From bd0b7e8d1c477bb67651f8d63d963f14174b75dd Mon Sep 17 00:00:00 2001 From: adam-fowler Date: Wed, 18 Dec 2024 16:20:41 +0000 Subject: [PATCH] Update models from aws-sdk-go-v2 release-2024-12-17 --- Package.swift | 7 + .../Services/AppSync/AppSync_shapes.swift | 2 +- .../Soto/Services/Artifact/Artifact_api.swift | 76 + .../Services/Artifact/Artifact_shapes.swift | 138 +- Sources/Soto/Services/Athena/Athena_api.swift | 1 + .../BCMPricingCalculator_shapes.swift | 14 +- Sources/Soto/Services/Backup/Backup_api.swift | 184 + .../Soto/Services/Backup/Backup_shapes.swift | 332 +- .../BackupSearch/BackupSearch_api.swift | 551 ++ .../BackupSearch/BackupSearch_shapes.swift | 1202 ++++ Sources/Soto/Services/Batch/Batch_api.swift | 6 +- .../Soto/Services/Batch/Batch_shapes.swift | 62 +- .../Soto/Services/Bedrock/Bedrock_api.swift | 2 + .../BedrockAgentRuntime_shapes.swift | 4 +- .../Soto/Services/Budgets/Budgets_api.swift | 6 +- .../CleanRoomsML/CleanRoomsML_api.swift | 2 +- .../CleanRoomsML/CleanRoomsML_shapes.swift | 7 +- Sources/Soto/Services/Cloud9/Cloud9_api.swift | 56 +- .../Soto/Services/Cloud9/Cloud9_shapes.swift | 2 +- .../CloudFront/CloudFront_shapes.swift | 41 +- .../Services/CloudHSMV2/CloudHSMV2_api.swift | 3 + .../CloudHSMV2/CloudHSMV2_shapes.swift | 29 +- .../CloudTrail/CloudTrail_shapes.swift | 2 +- .../CloudWatchLogs_shapes.swift | 8 +- .../CodePipeline/CodePipeline_api.swift | 6 +- .../CodePipeline/CodePipeline_shapes.swift | 18 +- .../CognitoIdentity/CognitoIdentity_api.swift | 40 + .../CognitoIdentityProvider_api.swift | 540 +- .../CognitoIdentityProvider_shapes.swift | 456 +- .../Soto/Services/Connect/Connect_api.swift | 475 +- .../Services/Connect/Connect_shapes.swift | 798 ++- Sources/Soto/Services/DLM/DLM_api.swift | 8 + Sources/Soto/Services/DLM/DLM_shapes.swift | 43 +- .../DatabaseMigrationService_api.swift | 20 +- .../DatabaseMigrationService_shapes.swift | 139 +- .../DynamoDBStreams/DynamoDBStreams_api.swift | 13 +- Sources/Soto/Services/EC2/EC2_api.swift | 86 +- Sources/Soto/Services/EC2/EC2_shapes.swift | 202 +- Sources/Soto/Services/ECS/ECS_api.swift | 2022 ++++--- Sources/Soto/Services/ECS/ECS_shapes.swift | 5160 +++++++++-------- Sources/Soto/Services/EKS/EKS_api.swift | 6 + Sources/Soto/Services/EKS/EKS_shapes.swift | 43 +- .../EMRServerless/EMRServerless_api.swift | 3 + .../EMRServerless/EMRServerless_shapes.swift | 8 +- .../Services/Finspace/Finspace_shapes.swift | 2 +- Sources/Soto/Services/Glue/Glue_api.swift | 30 +- Sources/Soto/Services/Glue/Glue_shapes.swift | 55 +- .../GreengrassV2/GreengrassV2_api.swift | 11 +- .../GreengrassV2/GreengrassV2_shapes.swift | 28 +- .../Services/GuardDuty/GuardDuty_api.swift | 2 +- .../Services/GuardDuty/GuardDuty_shapes.swift | 12 +- .../IVSRealTime/IVSRealTime_shapes.swift | 89 +- .../InternetMonitor/InternetMonitor_api.swift | 58 +- .../Services/Keyspaces/Keyspaces_api.swift | 4 +- .../LakeFormation/LakeFormation_api.swift | 40 +- Sources/Soto/Services/M2/M2_api.swift | 3 + Sources/Soto/Services/M2/M2_shapes.swift | 24 +- .../MarketplaceAgreement_api.swift | 7 + .../MediaConnect/MediaConnect_shapes.swift | 29 +- .../Services/MediaLive/MediaLive_api.swift | 3 - .../Services/MediaLive/MediaLive_shapes.swift | 75 +- .../MigrationHub/MigrationHub_api.swift | 264 +- .../MigrationHub/MigrationHub_shapes.swift | 259 +- .../NetworkManager/NetworkManager_api.swift | 2 +- .../NetworkManager_shapes.swift | 2 +- .../Organizations/Organizations_api.swift | 2 + Sources/Soto/Services/RDS/RDS_shapes.swift | 1 + .../Route53Domains_shapes.swift | 10 +- Sources/Soto/Services/SESv2/SESv2_api.swift | 175 + .../Soto/Services/SESv2/SESv2_shapes.swift | 323 +- .../ServiceDiscovery_api.swift | 99 +- .../ServiceDiscovery_shapes.swift | 122 +- .../Synthetics/Synthetics_shapes.swift | 28 +- .../TimestreamInfluxDB_api.swift | 7 +- .../TimestreamInfluxDB_shapes.swift | 56 +- .../TrustedAdvisor/TrustedAdvisor_api.swift | 7 + .../WorkSpaces/WorkSpaces_shapes.swift | 2 +- models/account.json | 797 +-- models/application-auto-scaling.json | 2 +- models/appsync.json | 2 +- models/artifact.json | 418 +- models/backup.json | 531 ++ models/backupsearch.json | 2820 +++++++++ models/batch.json | 99 +- models/bcm-pricing-calculator.json | 9 +- models/cleanroomsml.json | 5 +- models/cloud9.json | 76 +- models/cloudfront.json | 18 +- models/cloudhsm-v2.json | 86 +- models/cloudtrail.json | 4 +- models/cloudwatch-logs.json | 4 +- models/codepipeline.json | 18 +- models/cognito-identity-provider.json | 543 +- models/connect.json | 1350 ++++- models/controlcatalog.json | 2 +- models/database-migration-service.json | 187 +- models/dlm.json | 26 +- models/dsql.json | 8 +- models/ec2.json | 313 +- models/ecs.json | 1454 ++--- models/eks.json | 50 +- models/emr-serverless.json | 9 +- models/endpoints/endpoints.json | 884 ++- models/finspace.json | 2 +- models/glue.json | 71 +- models/greengrassv2.json | 42 +- models/guardduty.json | 18 +- models/ivs-realtime.json | 156 +- models/keyspaces.json | 2 +- models/m2.json | 33 + models/mediaconnect.json | 33 + models/medialive.json | 121 +- models/migration-hub.json | 509 +- models/networkmanager.json | 2 +- models/rds.json | 6 + models/route-53-domains.json | 18 +- models/servicediscovery.json | 291 +- models/sesv2.json | 1109 +++- models/synthetics.json | 24 +- models/timestream-influxdb.json | 72 +- models/workspaces.json | 2 +- 121 files changed, 20788 insertions(+), 6092 deletions(-) create mode 100644 Sources/Soto/Services/BackupSearch/BackupSearch_api.swift create mode 100644 Sources/Soto/Services/BackupSearch/BackupSearch_shapes.swift create mode 100644 models/backupsearch.json diff --git a/Package.swift b/Package.swift index 6339f5abce..e3ae39bda0 100644 --- a/Package.swift +++ b/Package.swift @@ -65,6 +65,7 @@ let package = Package( .library(name: "SotoBCMPricingCalculator", targets: ["SotoBCMPricingCalculator"]), .library(name: "SotoBackup", targets: ["SotoBackup"]), .library(name: "SotoBackupGateway", targets: ["SotoBackupGateway"]), + .library(name: "SotoBackupSearch", targets: ["SotoBackupSearch"]), .library(name: "SotoBatch", targets: ["SotoBatch"]), .library(name: "SotoBedrock", targets: ["SotoBedrock"]), .library(name: "SotoBedrockAgent", targets: ["SotoBedrockAgent"]), @@ -658,6 +659,12 @@ let package = Package( path: "./Sources/Soto/Services/BackupGateway", swiftSettings: swiftSettings ), + .target( + name: "SotoBackupSearch", + dependencies: [.product(name: "SotoCore", package: "soto-core")], + path: "./Sources/Soto/Services/BackupSearch", + swiftSettings: swiftSettings + ), .target( name: "SotoBatch", dependencies: [.product(name: "SotoCore", package: "soto-core")], diff --git a/Sources/Soto/Services/AppSync/AppSync_shapes.swift b/Sources/Soto/Services/AppSync/AppSync_shapes.swift index cc2e35d9ef..23af3d19f1 100644 --- a/Sources/Soto/Services/AppSync/AppSync_shapes.swift +++ b/Sources/Soto/Services/AppSync/AppSync_shapes.swift @@ -1592,7 +1592,7 @@ extension AppSync { public let relationalDatabaseConfig: RelationalDatabaseDataSourceConfig? /// The Identity and Access Management (IAM) service role Amazon Resource Name (ARN) for the data source. The system assumes this role when accessing the data source. public let serviceRoleArn: String? - /// The type of the data source. AWS_LAMBDA: The data source is an Lambda function. AMAZON_DYNAMODB: The data source is an Amazon DynamoDB table. AMAZON_ELASTICSEARCH: The data source is an Amazon OpenSearch Service domain. AMAZON_OPENSEARCH_SERVICE: The data source is an Amazon OpenSearch Service domain. AMAZON_EVENTBRIDGE: The data source is an Amazon EventBridge configuration. NONE: There is no data source. Use this type when you want to invoke a GraphQL operation without connecting to a data source, such as when you're performing data transformation with resolvers or invoking a subscription from a mutation. HTTP: The data source is an HTTP endpoint. RELATIONAL_DATABASE: The data source is a relational database. + /// The type of the data source. AWS_LAMBDA: The data source is an Lambda function. AMAZON_DYNAMODB: The data source is an Amazon DynamoDB table. AMAZON_ELASTICSEARCH: The data source is an Amazon OpenSearch Service domain. AMAZON_OPENSEARCH_SERVICE: The data source is an Amazon OpenSearch Service domain. AMAZON_EVENTBRIDGE: The data source is an Amazon EventBridge configuration. AMAZON_BEDROCK_RUNTIME: The data source is the Amazon Bedrock runtime. NONE: There is no data source. Use this type when you want to invoke a GraphQL operation without connecting to a data source, such as when you're performing data transformation with resolvers or invoking a subscription from a mutation. HTTP: The data source is an HTTP endpoint. RELATIONAL_DATABASE: The data source is a relational database. public let type: DataSourceType? @inlinable diff --git a/Sources/Soto/Services/Artifact/Artifact_api.swift b/Sources/Soto/Services/Artifact/Artifact_api.swift index 698fc31203..2781a17651 100644 --- a/Sources/Soto/Services/Artifact/Artifact_api.swift +++ b/Sources/Soto/Services/Artifact/Artifact_api.swift @@ -204,6 +204,38 @@ public struct Artifact: AWSService { return try await self.getTermForReport(input, logger: logger) } + /// List active customer-agreements applicable to calling identity. + @Sendable + @inlinable + public func listCustomerAgreements(_ input: ListCustomerAgreementsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListCustomerAgreementsResponse { + try await self.client.execute( + operation: "ListCustomerAgreements", + path: "/v1/customer-agreement/list", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// List active customer-agreements applicable to calling identity. + /// + /// Parameters: + /// - maxResults: Maximum number of resources to return in the paginated response. + /// - nextToken: Pagination token to request the next page of resources. + /// - logger: Logger use during operation + @inlinable + public func listCustomerAgreements( + maxResults: Int? = nil, + nextToken: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListCustomerAgreementsResponse { + let input = ListCustomerAgreementsRequest( + maxResults: maxResults, + nextToken: nextToken + ) + return try await self.listCustomerAgreements(input, logger: logger) + } + /// List available reports. @Sendable @inlinable @@ -279,6 +311,40 @@ extension Artifact { @available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) extension Artifact { + /// Return PaginatorSequence for operation ``listCustomerAgreements(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listCustomerAgreementsPaginator( + _ input: ListCustomerAgreementsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listCustomerAgreements, + inputKey: \ListCustomerAgreementsRequest.nextToken, + outputKey: \ListCustomerAgreementsResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listCustomerAgreements(_:logger:)``. + /// + /// - Parameters: + /// - maxResults: Maximum number of resources to return in the paginated response. + /// - logger: Logger used for logging + @inlinable + public func listCustomerAgreementsPaginator( + maxResults: Int? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListCustomerAgreementsRequest( + maxResults: maxResults + ) + return self.listCustomerAgreementsPaginator(input, logger: logger) + } + /// Return PaginatorSequence for operation ``listReports(_:logger:)``. /// /// - Parameters: @@ -314,6 +380,16 @@ extension Artifact { } } +extension Artifact.ListCustomerAgreementsRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> Artifact.ListCustomerAgreementsRequest { + return .init( + maxResults: self.maxResults, + nextToken: token + ) + } +} + extension Artifact.ListReportsRequest: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> Artifact.ListReportsRequest { diff --git a/Sources/Soto/Services/Artifact/Artifact_shapes.swift b/Sources/Soto/Services/Artifact/Artifact_shapes.swift index 013090686d..0b4cdd8989 100644 --- a/Sources/Soto/Services/Artifact/Artifact_shapes.swift +++ b/Sources/Soto/Services/Artifact/Artifact_shapes.swift @@ -27,28 +27,37 @@ extension Artifact { // MARK: Enums public enum AcceptanceType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { - /// Require explicit click-through acceptance of - /// the Term associated with this Report. + /// Require explicit click-through acceptance of the + /// Term associated with this Report. case explicit = "EXPLICIT" - /// Do not require explicit click-through - /// acceptance of the Term associated with - /// this Report. + /// Do not require explicit click-through acceptance + /// of the Term associated with this Report case passthrough = "PASSTHROUGH" public var description: String { return self.rawValue } } + public enum AgreementType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case `default` = "DEFAULT" + case custom = "CUSTOM" + case modified = "MODIFIED" + public var description: String { return self.rawValue } + } + + public enum CustomerAgreementState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case active = "ACTIVE" + case awsTerminated = "AWS_TERMINATED" + case customerTerminated = "CUSTOMER_TERMINATED" + public var description: String { return self.rawValue } + } + public enum NotificationSubscriptionStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { - /// The account is not subscribed for notification. case notSubscribed = "NOT_SUBSCRIBED" - /// The account is subscribed for notification. case subscribed = "SUBSCRIBED" public var description: String { return self.rawValue } } public enum PublishedState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { - /// The resource is published for consumption. case published = "PUBLISHED" - /// The resource is not published for consumption. case unpublished = "UNPUBLISHED" public var description: String { return self.rawValue } } @@ -77,6 +86,70 @@ extension Artifact { } } + public struct CustomerAgreementSummary: AWSDecodableShape { + /// Terms required to accept the agreement resource. + public let acceptanceTerms: [String]? + /// ARN of the agreement resource the customer-agreement resource represents. + public let agreementArn: String? + /// ARN of the customer-agreement resource. + public let arn: String? + /// AWS account Id that owns the resource. + public let awsAccountId: String? + /// Description of the resource. + public let description: String? + /// Timestamp indicating when the agreement was terminated. + @OptionalCustomCoding + public var effectiveEnd: Date? + /// Timestamp indicating when the agreement became effective. + @OptionalCustomCoding + public var effectiveStart: Date? + /// Identifier of the customer-agreement resource. + public let id: String? + /// Name of the customer-agreement resource. + public let name: String? + /// ARN of the organization that owns the resource. + public let organizationArn: String? + /// State of the resource. + public let state: CustomerAgreementState? + /// Terms required to terminate the customer-agreement resource. + public let terminateTerms: [String]? + /// Type of the customer-agreement resource. + public let type: AgreementType? + + @inlinable + public init(acceptanceTerms: [String]? = nil, agreementArn: String? = nil, arn: String? = nil, awsAccountId: String? = nil, description: String? = nil, effectiveEnd: Date? = nil, effectiveStart: Date? = nil, id: String? = nil, name: String? = nil, organizationArn: String? = nil, state: CustomerAgreementState? = nil, terminateTerms: [String]? = nil, type: AgreementType? = nil) { + self.acceptanceTerms = acceptanceTerms + self.agreementArn = agreementArn + self.arn = arn + self.awsAccountId = awsAccountId + self.description = description + self.effectiveEnd = effectiveEnd + self.effectiveStart = effectiveStart + self.id = id + self.name = name + self.organizationArn = organizationArn + self.state = state + self.terminateTerms = terminateTerms + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case acceptanceTerms = "acceptanceTerms" + case agreementArn = "agreementArn" + case arn = "arn" + case awsAccountId = "awsAccountId" + case description = "description" + case effectiveEnd = "effectiveEnd" + case effectiveStart = "effectiveStart" + case id = "id" + case name = "name" + case organizationArn = "organizationArn" + case state = "state" + case terminateTerms = "terminateTerms" + case type = "type" + } + } + public struct GetAccountSettingsRequest: AWSEncodableShape { public init() {} } @@ -228,6 +301,53 @@ extension Artifact { } } + public struct ListCustomerAgreementsRequest: AWSEncodableShape { + /// Maximum number of resources to return in the paginated response. + public let maxResults: Int? + /// Pagination token to request the next page of resources. + public let nextToken: String? + + @inlinable + public init(maxResults: Int? = nil, nextToken: String? = nil) { + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 300) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListCustomerAgreementsResponse: AWSDecodableShape { + /// List of customer-agreement resources. + public let customerAgreements: [CustomerAgreementSummary] + /// Pagination token to request the next page of resources. + public let nextToken: String? + + @inlinable + public init(customerAgreements: [CustomerAgreementSummary], nextToken: String? = nil) { + self.customerAgreements = customerAgreements + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case customerAgreements = "customerAgreements" + case nextToken = "nextToken" + } + } + public struct ListReportsRequest: AWSEncodableShape { /// Maximum number of resources to return in the paginated response. public let maxResults: Int? diff --git a/Sources/Soto/Services/Athena/Athena_api.swift b/Sources/Soto/Services/Athena/Athena_api.swift index 28ab492965..c4128e21d2 100644 --- a/Sources/Soto/Services/Athena/Athena_api.swift +++ b/Sources/Soto/Services/Athena/Athena_api.swift @@ -92,6 +92,7 @@ public struct Athena: AWSService { "ap-southeast-2": "athena.ap-southeast-2.api.aws", "ap-southeast-3": "athena.ap-southeast-3.api.aws", "ap-southeast-4": "athena.ap-southeast-4.api.aws", + "ap-southeast-5": "athena.ap-southeast-5.api.aws", "ca-central-1": "athena.ca-central-1.api.aws", "ca-west-1": "athena.ca-west-1.api.aws", "cn-north-1": "athena.cn-north-1.api.amazonwebservices.com.cn", diff --git a/Sources/Soto/Services/BCMPricingCalculator/BCMPricingCalculator_shapes.swift b/Sources/Soto/Services/BCMPricingCalculator/BCMPricingCalculator_shapes.swift index a890cd40df..9338d244cd 100644 --- a/Sources/Soto/Services/BCMPricingCalculator/BCMPricingCalculator_shapes.swift +++ b/Sources/Soto/Services/BCMPricingCalculator/BCMPricingCalculator_shapes.swift @@ -497,21 +497,21 @@ extension BCMPricingCalculator { public func validate(name: String) throws { try self.validate(self.availabilityZone, name: "availabilityZone", parent: name, max: 32) - try self.validate(self.availabilityZone, name: "availabilityZone", parent: name, pattern: "^[-a-zA-Z0-9\\.\\-_:,]*$") + try self.validate(self.availabilityZone, name: "availabilityZone", parent: name, pattern: "^[-a-zA-Z0-9\\.\\-_:, \\/()]*$") try self.validate(self.group, name: "group", parent: name, max: 30) try self.validate(self.group, name: "group", parent: name, pattern: "^[a-zA-Z0-9-]*$") try self.historicalUsage?.validate(name: "\(name).historicalUsage") try self.validate(self.key, name: "key", parent: name, max: 10) try self.validate(self.key, name: "key", parent: name, pattern: "^[a-zA-Z0-9]*$") try self.validate(self.operation, name: "operation", parent: name, max: 32) - try self.validate(self.operation, name: "operation", parent: name, pattern: "^[-a-zA-Z0-9\\.\\-_:,]*$") + try self.validate(self.operation, name: "operation", parent: name, pattern: "^[-a-zA-Z0-9\\.\\-_:, \\/()]*$") try self.validate(self.serviceCode, name: "serviceCode", parent: name, max: 32) try self.validate(self.serviceCode, name: "serviceCode", parent: name, pattern: "^[-a-zA-Z0-9\\.\\-_:, \\/()]*$") try self.validate(self.usageAccountId, name: "usageAccountId", parent: name, max: 12) try self.validate(self.usageAccountId, name: "usageAccountId", parent: name, min: 12) try self.validate(self.usageAccountId, name: "usageAccountId", parent: name, pattern: "^\\d{12}$") try self.validate(self.usageType, name: "usageType", parent: name, max: 128) - try self.validate(self.usageType, name: "usageType", parent: name, pattern: "^[-a-zA-Z0-9\\.\\-_:,]*$") + try self.validate(self.usageType, name: "usageType", parent: name, pattern: "^[-a-zA-Z0-9\\.\\-_:, \\/()]*$") } private enum CodingKeys: String, CodingKey { @@ -701,14 +701,14 @@ extension BCMPricingCalculator { try self.validate(self.key, name: "key", parent: name, max: 10) try self.validate(self.key, name: "key", parent: name, pattern: "^[a-zA-Z0-9]*$") try self.validate(self.operation, name: "operation", parent: name, max: 32) - try self.validate(self.operation, name: "operation", parent: name, pattern: "^[-a-zA-Z0-9\\.\\-_:,]*$") + try self.validate(self.operation, name: "operation", parent: name, pattern: "^[-a-zA-Z0-9\\.\\-_:, \\/()]*$") try self.validate(self.serviceCode, name: "serviceCode", parent: name, max: 32) try self.validate(self.serviceCode, name: "serviceCode", parent: name, pattern: "^[-a-zA-Z0-9\\.\\-_:, \\/()]*$") try self.validate(self.usageAccountId, name: "usageAccountId", parent: name, max: 12) try self.validate(self.usageAccountId, name: "usageAccountId", parent: name, min: 12) try self.validate(self.usageAccountId, name: "usageAccountId", parent: name, pattern: "^\\d{12}$") try self.validate(self.usageType, name: "usageType", parent: name, max: 128) - try self.validate(self.usageType, name: "usageType", parent: name, pattern: "^[-a-zA-Z0-9\\.\\-_:,]*$") + try self.validate(self.usageType, name: "usageType", parent: name, pattern: "^[-a-zA-Z0-9\\.\\-_:, \\/()]*$") } private enum CodingKeys: String, CodingKey { @@ -2439,14 +2439,14 @@ extension BCMPricingCalculator { public func validate(name: String) throws { try self.validate(self.operation, name: "operation", parent: name, max: 32) - try self.validate(self.operation, name: "operation", parent: name, pattern: "^[-a-zA-Z0-9\\.\\-_:,]*$") + try self.validate(self.operation, name: "operation", parent: name, pattern: "^[-a-zA-Z0-9\\.\\-_:, \\/()]*$") try self.validate(self.serviceCode, name: "serviceCode", parent: name, max: 32) try self.validate(self.serviceCode, name: "serviceCode", parent: name, pattern: "^[-a-zA-Z0-9\\.\\-_:, \\/()]*$") try self.validate(self.usageAccountId, name: "usageAccountId", parent: name, max: 12) try self.validate(self.usageAccountId, name: "usageAccountId", parent: name, min: 12) try self.validate(self.usageAccountId, name: "usageAccountId", parent: name, pattern: "^\\d{12}$") try self.validate(self.usageType, name: "usageType", parent: name, max: 128) - try self.validate(self.usageType, name: "usageType", parent: name, pattern: "^[-a-zA-Z0-9\\.\\-_:,]*$") + try self.validate(self.usageType, name: "usageType", parent: name, pattern: "^[-a-zA-Z0-9\\.\\-_:, \\/()]*$") } private enum CodingKeys: String, CodingKey { diff --git a/Sources/Soto/Services/Backup/Backup_api.swift b/Sources/Soto/Services/Backup/Backup_api.swift index 55add58715..bc01d07617 100644 --- a/Sources/Soto/Services/Backup/Backup_api.swift +++ b/Sources/Soto/Services/Backup/Backup_api.swift @@ -1411,6 +1411,38 @@ public struct Backup: AWSService { return try await self.getLegalHold(input, logger: logger) } + /// This operation returns the metadata and details specific to the backup index associated with the specified recovery point. + @Sendable + @inlinable + public func getRecoveryPointIndexDetails(_ input: GetRecoveryPointIndexDetailsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetRecoveryPointIndexDetailsOutput { + try await self.client.execute( + operation: "GetRecoveryPointIndexDetails", + path: "/backup-vaults/{BackupVaultName}/recovery-points/{RecoveryPointArn}/index", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// This operation returns the metadata and details specific to the backup index associated with the specified recovery point. + /// + /// Parameters: + /// - backupVaultName: The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Region where they are created. Accepted characters include lowercase letters, numbers, and hyphens. + /// - recoveryPointArn: An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. + /// - logger: Logger use during operation + @inlinable + public func getRecoveryPointIndexDetails( + backupVaultName: String, + recoveryPointArn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetRecoveryPointIndexDetailsOutput { + let input = GetRecoveryPointIndexDetailsInput( + backupVaultName: backupVaultName, + recoveryPointArn: recoveryPointArn + ) + return try await self.getRecoveryPointIndexDetails(input, logger: logger) + } + /// Returns a set of metadata key-value pairs that were used to create the backup. @Sendable @inlinable @@ -2015,6 +2047,53 @@ public struct Backup: AWSService { return try await self.listFrameworks(input, logger: logger) } + /// This operation returns a list of recovery points that have an associated index, belonging to the specified account. Optional parameters you can include are: MaxResults; NextToken; SourceResourceArns; CreatedBefore; CreatedAfter; and ResourceType. + @Sendable + @inlinable + public func listIndexedRecoveryPoints(_ input: ListIndexedRecoveryPointsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListIndexedRecoveryPointsOutput { + try await self.client.execute( + operation: "ListIndexedRecoveryPoints", + path: "/indexes/recovery-point", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// This operation returns a list of recovery points that have an associated index, belonging to the specified account. Optional parameters you can include are: MaxResults; NextToken; SourceResourceArns; CreatedBefore; CreatedAfter; and ResourceType. + /// + /// Parameters: + /// - createdAfter: Returns only indexed recovery points that were created after the specified date. + /// - createdBefore: Returns only indexed recovery points that were created before the specified date. + /// - indexStatus: Include this parameter to filter the returned list by the indicated statuses. Accepted values: PENDING | ACTIVE | FAILED | DELETING A recovery point with an index that has the status of ACTIVE can be included in a search. + /// - maxResults: The maximum number of resource list items to be returned. + /// - nextToken: The next item following a partial list of returned recovery points. For example, if a request is made to return MaxResults number of indexed recovery points, NextToken allows you to return more items in your list starting at the location pointed to by the next token. + /// - resourceType: Returns a list of indexed recovery points for the specified resource type(s). Accepted values include: EBS for Amazon Elastic Block Store S3 for Amazon Simple Storage Service (Amazon S3) + /// - sourceResourceArn: A string of the Amazon Resource Name (ARN) that uniquely identifies the source resource. + /// - logger: Logger use during operation + @inlinable + public func listIndexedRecoveryPoints( + createdAfter: Date? = nil, + createdBefore: Date? = nil, + indexStatus: IndexStatus? = nil, + maxResults: Int? = nil, + nextToken: String? = nil, + resourceType: String? = nil, + sourceResourceArn: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListIndexedRecoveryPointsOutput { + let input = ListIndexedRecoveryPointsInput( + createdAfter: createdAfter, + createdBefore: createdBefore, + indexStatus: indexStatus, + maxResults: maxResults, + nextToken: nextToken, + resourceType: resourceType, + sourceResourceArn: sourceResourceArn + ) + return try await self.listIndexedRecoveryPoints(input, logger: logger) + } + /// This action returns metadata about active and previous legal holds. @Sendable @inlinable @@ -2729,6 +2808,7 @@ public struct Backup: AWSService { /// - completeWindowMinutes: A value in minutes during which a successfully started backup must complete, or else Backup will cancel the job. This value is optional. This value begins counting down from when the backup was scheduled. It does not add additional time for StartWindowMinutes, or if the backup started later than scheduled. Like StartWindowMinutes, this parameter has a maximum value of 100 years (52,560,000 minutes). /// - iamRoleArn: Specifies the IAM role ARN used to create the target recovery point; for example, arn:aws:iam::123456789012:role/S3Access. /// - idempotencyToken: A customer-chosen string that you can use to distinguish between otherwise identical calls to StartBackupJob. Retrying a successful request with the same idempotency token results in a success message with no action taken. + /// - index: Include this parameter to enable index creation if your backup job has a resource type that supports backup indexes. Resource types that support backup indexes include: EBS for Amazon Elastic Block Store S3 for Amazon Simple Storage Service (Amazon S3) Index can have 1 of 2 possible values, either ENABLED or DISABLED. To create a backup index for an eligible ACTIVE recovery point that does not yet have a backup index, set value to ENABLED. To delete a backup index, set value to DISABLED. /// - lifecycle: The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Backup will transition and expire backups automatically according to the lifecycle that you define. Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “retention” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold. Resource types that can transition to cold storage are listed in the Feature availability by resource table. Backup ignores this expression for other resource types. This parameter has a maximum value of 100 years (36,500 days). /// - recoveryPointTags: The tags to assign to the resources. /// - resourceArn: An Amazon Resource Name (ARN) that uniquely identifies a resource. The format of the ARN depends on the resource type. @@ -2741,6 +2821,7 @@ public struct Backup: AWSService { completeWindowMinutes: Int64? = nil, iamRoleArn: String, idempotencyToken: String? = nil, + index: Index? = nil, lifecycle: Lifecycle? = nil, recoveryPointTags: [String: String]? = nil, resourceArn: String, @@ -2753,6 +2834,7 @@ public struct Backup: AWSService { completeWindowMinutes: completeWindowMinutes, iamRoleArn: iamRoleArn, idempotencyToken: idempotencyToken, + index: index, lifecycle: lifecycle, recoveryPointTags: recoveryPointTags, resourceArn: resourceArn, @@ -3073,6 +3155,44 @@ public struct Backup: AWSService { return try await self.updateGlobalSettings(input, logger: logger) } + /// This operation updates the settings of a recovery point index. Required: BackupVaultName, RecoveryPointArn, and IAMRoleArn + @Sendable + @inlinable + public func updateRecoveryPointIndexSettings(_ input: UpdateRecoveryPointIndexSettingsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateRecoveryPointIndexSettingsOutput { + try await self.client.execute( + operation: "UpdateRecoveryPointIndexSettings", + path: "/backup-vaults/{BackupVaultName}/recovery-points/{RecoveryPointArn}/index", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// This operation updates the settings of a recovery point index. Required: BackupVaultName, RecoveryPointArn, and IAMRoleArn + /// + /// Parameters: + /// - backupVaultName: The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Region where they are created. Accepted characters include lowercase letters, numbers, and hyphens. + /// - iamRoleArn: This specifies the IAM role ARN used for this operation. For example, arn:aws:iam::123456789012:role/S3Access + /// - index: Index can have 1 of 2 possible values, either ENABLED or DISABLED. To create a backup index for an eligible ACTIVE recovery point that does not yet have a backup index, set value to ENABLED. To delete a backup index, set value to DISABLED. + /// - recoveryPointArn: An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. + /// - logger: Logger use during operation + @inlinable + public func updateRecoveryPointIndexSettings( + backupVaultName: String, + iamRoleArn: String? = nil, + index: Index, + recoveryPointArn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateRecoveryPointIndexSettingsOutput { + let input = UpdateRecoveryPointIndexSettingsInput( + backupVaultName: backupVaultName, + iamRoleArn: iamRoleArn, + index: index, + recoveryPointArn: recoveryPointArn + ) + return try await self.updateRecoveryPointIndexSettings(input, logger: logger) + } + /// Sets the transition lifecycle of a recovery point. The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Backup transitions and expires backups automatically according to the lifecycle that you define. Resource types that can transition to cold storage are listed in the Feature availability by resource table. Backup ignores this expression for other resource types. Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “retention” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold. If your lifecycle currently uses the parameters DeleteAfterDays and MoveToColdStorageAfterDays, include these parameters and their values when you call this operation. Not including them may result in your plan updating with null values. This operation does not support continuous backups. @Sendable @inlinable @@ -3713,6 +3833,55 @@ extension Backup { return self.listFrameworksPaginator(input, logger: logger) } + /// Return PaginatorSequence for operation ``listIndexedRecoveryPoints(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listIndexedRecoveryPointsPaginator( + _ input: ListIndexedRecoveryPointsInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listIndexedRecoveryPoints, + inputKey: \ListIndexedRecoveryPointsInput.nextToken, + outputKey: \ListIndexedRecoveryPointsOutput.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listIndexedRecoveryPoints(_:logger:)``. + /// + /// - Parameters: + /// - createdAfter: Returns only indexed recovery points that were created after the specified date. + /// - createdBefore: Returns only indexed recovery points that were created before the specified date. + /// - indexStatus: Include this parameter to filter the returned list by the indicated statuses. Accepted values: PENDING | ACTIVE | FAILED | DELETING A recovery point with an index that has the status of ACTIVE can be included in a search. + /// - maxResults: The maximum number of resource list items to be returned. + /// - resourceType: Returns a list of indexed recovery points for the specified resource type(s). Accepted values include: EBS for Amazon Elastic Block Store S3 for Amazon Simple Storage Service (Amazon S3) + /// - sourceResourceArn: A string of the Amazon Resource Name (ARN) that uniquely identifies the source resource. + /// - logger: Logger used for logging + @inlinable + public func listIndexedRecoveryPointsPaginator( + createdAfter: Date? = nil, + createdBefore: Date? = nil, + indexStatus: IndexStatus? = nil, + maxResults: Int? = nil, + resourceType: String? = nil, + sourceResourceArn: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListIndexedRecoveryPointsInput( + createdAfter: createdAfter, + createdBefore: createdBefore, + indexStatus: indexStatus, + maxResults: maxResults, + resourceType: resourceType, + sourceResourceArn: sourceResourceArn + ) + return self.listIndexedRecoveryPointsPaginator(input, logger: logger) + } + /// Return PaginatorSequence for operation ``listLegalHolds(_:logger:)``. /// /// - Parameters: @@ -4432,6 +4601,21 @@ extension Backup.ListFrameworksInput: AWSPaginateToken { } } +extension Backup.ListIndexedRecoveryPointsInput: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> Backup.ListIndexedRecoveryPointsInput { + return .init( + createdAfter: self.createdAfter, + createdBefore: self.createdBefore, + indexStatus: self.indexStatus, + maxResults: self.maxResults, + nextToken: token, + resourceType: self.resourceType, + sourceResourceArn: self.sourceResourceArn + ) + } +} + extension Backup.ListLegalHoldsInput: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> Backup.ListLegalHoldsInput { diff --git a/Sources/Soto/Services/Backup/Backup_shapes.swift b/Sources/Soto/Services/Backup/Backup_shapes.swift index b335e4ecf1..f8b70d9b4b 100644 --- a/Sources/Soto/Services/Backup/Backup_shapes.swift +++ b/Sources/Soto/Services/Backup/Backup_shapes.swift @@ -111,6 +111,20 @@ extension Backup { public var description: String { return self.rawValue } } + public enum Index: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + + public enum IndexStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case active = "ACTIVE" + case deleting = "DELETING" + case failed = "FAILED" + case pending = "PENDING" + public var description: String { return self.rawValue } + } + public enum LegalHoldStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case active = "ACTIVE" case canceled = "CANCELED" @@ -499,6 +513,8 @@ extension Backup { public let copyActions: [CopyAction]? /// Specifies whether Backup creates continuous backups. True causes Backup to create continuous backups capable of point-in-time restore (PITR). False (or not specified) causes Backup to create snapshot backups. public let enableContinuousBackup: Bool? + /// IndexActions is an array you use to specify how backup data should be indexed. eEach BackupRule can have 0 or 1 IndexAction, as each backup can have up to one index associated with it. Within the array is ResourceType. Only one will be accepted for each BackupRule. + public let indexActions: [IndexAction]? /// The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Backup transitions and expires backups automatically according to the lifecycle that you define. Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “retention” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold. Resource types that can transition to cold storage are listed in the Feature availability by resource table. Backup ignores this expression for other resource types. public let lifecycle: Lifecycle? /// The tags that are assigned to resources that are associated with this rule when restored from backup. @@ -517,10 +533,11 @@ extension Backup { public let targetBackupVaultName: String @inlinable - public init(completionWindowMinutes: Int64? = nil, copyActions: [CopyAction]? = nil, enableContinuousBackup: Bool? = nil, lifecycle: Lifecycle? = nil, recoveryPointTags: [String: String]? = nil, ruleId: String? = nil, ruleName: String, scheduleExpression: String? = nil, scheduleExpressionTimezone: String? = nil, startWindowMinutes: Int64? = nil, targetBackupVaultName: String) { + public init(completionWindowMinutes: Int64? = nil, copyActions: [CopyAction]? = nil, enableContinuousBackup: Bool? = nil, indexActions: [IndexAction]? = nil, lifecycle: Lifecycle? = nil, recoveryPointTags: [String: String]? = nil, ruleId: String? = nil, ruleName: String, scheduleExpression: String? = nil, scheduleExpressionTimezone: String? = nil, startWindowMinutes: Int64? = nil, targetBackupVaultName: String) { self.completionWindowMinutes = completionWindowMinutes self.copyActions = copyActions self.enableContinuousBackup = enableContinuousBackup + self.indexActions = indexActions self.lifecycle = lifecycle self.recoveryPointTags = recoveryPointTags self.ruleId = ruleId @@ -535,6 +552,7 @@ extension Backup { case completionWindowMinutes = "CompletionWindowMinutes" case copyActions = "CopyActions" case enableContinuousBackup = "EnableContinuousBackup" + case indexActions = "IndexActions" case lifecycle = "Lifecycle" case recoveryPointTags = "RecoveryPointTags" case ruleId = "RuleId" @@ -553,6 +571,8 @@ extension Backup { public let copyActions: [CopyAction]? /// Specifies whether Backup creates continuous backups. True causes Backup to create continuous backups capable of point-in-time restore (PITR). False (or not specified) causes Backup to create snapshot backups. public let enableContinuousBackup: Bool? + /// There can up to one IndexAction in each BackupRule, as each backup can have 0 or 1 backup index associated with it. Within the array is ResourceTypes. Only 1 resource type will be accepted for each BackupRule. Valid values: EBS for Amazon Elastic Block Store S3 for Amazon Simple Storage Service (Amazon S3) + public let indexActions: [IndexAction]? /// The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Backup will transition and expire backups automatically according to the lifecycle that you define. Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “retention” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold storage. Resource types that can transition to cold storage are listed in the Feature availability by resource table. Backup ignores this expression for other resource types. This parameter has a maximum value of 100 years (36,500 days). public let lifecycle: Lifecycle? /// The tags to assign to the resources. @@ -569,10 +589,11 @@ extension Backup { public let targetBackupVaultName: String @inlinable - public init(completionWindowMinutes: Int64? = nil, copyActions: [CopyAction]? = nil, enableContinuousBackup: Bool? = nil, lifecycle: Lifecycle? = nil, recoveryPointTags: [String: String]? = nil, ruleName: String, scheduleExpression: String? = nil, scheduleExpressionTimezone: String? = nil, startWindowMinutes: Int64? = nil, targetBackupVaultName: String) { + public init(completionWindowMinutes: Int64? = nil, copyActions: [CopyAction]? = nil, enableContinuousBackup: Bool? = nil, indexActions: [IndexAction]? = nil, lifecycle: Lifecycle? = nil, recoveryPointTags: [String: String]? = nil, ruleName: String, scheduleExpression: String? = nil, scheduleExpressionTimezone: String? = nil, startWindowMinutes: Int64? = nil, targetBackupVaultName: String) { self.completionWindowMinutes = completionWindowMinutes self.copyActions = copyActions self.enableContinuousBackup = enableContinuousBackup + self.indexActions = indexActions self.lifecycle = lifecycle self.recoveryPointTags = recoveryPointTags self.ruleName = ruleName @@ -583,6 +604,9 @@ extension Backup { } public func validate(name: String) throws { + try self.indexActions?.forEach { + try $0.validate(name: "\(name).indexActions[]") + } try self.validate(self.ruleName, name: "ruleName", parent: name, pattern: "^[a-zA-Z0-9\\-\\_\\.]{1,50}$") try self.validate(self.targetBackupVaultName, name: "targetBackupVaultName", parent: name, pattern: "^[a-zA-Z0-9\\-\\_]{2,50}$") } @@ -591,6 +615,7 @@ extension Backup { case completionWindowMinutes = "CompletionWindowMinutes" case copyActions = "CopyActions" case enableContinuousBackup = "EnableContinuousBackup" + case indexActions = "IndexActions" case lifecycle = "Lifecycle" case recoveryPointTags = "RecoveryPointTags" case ruleName = "RuleName" @@ -2310,6 +2335,10 @@ extension Backup { public let encryptionKeyArn: String? /// Specifies the IAM role ARN used to create the target recovery point; for example, arn:aws:iam::123456789012:role/S3Access. public let iamRoleArn: String? + /// This is the current status for the backup index associated with the specified recovery point. Statuses are: PENDING | ACTIVE | FAILED | DELETING A recovery point with an index that has the status of ACTIVE can be included in a search. + public let indexStatus: IndexStatus? + /// A string in the form of a detailed message explaining the status of a backup index associated with the recovery point. + public let indexStatusMessage: String? /// A Boolean value that is returned as TRUE if the specified recovery point is encrypted, or FALSE if the recovery point is not encrypted. public let isEncrypted: Bool? /// This returns the boolean value that a recovery point is a parent (composite) job. @@ -2340,7 +2369,7 @@ extension Backup { public let vaultType: VaultType? @inlinable - public init(backupSizeInBytes: Int64? = nil, backupVaultArn: String? = nil, backupVaultName: String? = nil, calculatedLifecycle: CalculatedLifecycle? = nil, completionDate: Date? = nil, compositeMemberIdentifier: String? = nil, createdBy: RecoveryPointCreator? = nil, creationDate: Date? = nil, encryptionKeyArn: String? = nil, iamRoleArn: String? = nil, isEncrypted: Bool? = nil, isParent: Bool? = nil, lastRestoreTime: Date? = nil, lifecycle: Lifecycle? = nil, parentRecoveryPointArn: String? = nil, recoveryPointArn: String? = nil, resourceArn: String? = nil, resourceName: String? = nil, resourceType: String? = nil, sourceBackupVaultArn: String? = nil, status: RecoveryPointStatus? = nil, statusMessage: String? = nil, storageClass: StorageClass? = nil, vaultType: VaultType? = nil) { + public init(backupSizeInBytes: Int64? = nil, backupVaultArn: String? = nil, backupVaultName: String? = nil, calculatedLifecycle: CalculatedLifecycle? = nil, completionDate: Date? = nil, compositeMemberIdentifier: String? = nil, createdBy: RecoveryPointCreator? = nil, creationDate: Date? = nil, encryptionKeyArn: String? = nil, iamRoleArn: String? = nil, indexStatus: IndexStatus? = nil, indexStatusMessage: String? = nil, isEncrypted: Bool? = nil, isParent: Bool? = nil, lastRestoreTime: Date? = nil, lifecycle: Lifecycle? = nil, parentRecoveryPointArn: String? = nil, recoveryPointArn: String? = nil, resourceArn: String? = nil, resourceName: String? = nil, resourceType: String? = nil, sourceBackupVaultArn: String? = nil, status: RecoveryPointStatus? = nil, statusMessage: String? = nil, storageClass: StorageClass? = nil, vaultType: VaultType? = nil) { self.backupSizeInBytes = backupSizeInBytes self.backupVaultArn = backupVaultArn self.backupVaultName = backupVaultName @@ -2351,6 +2380,8 @@ extension Backup { self.creationDate = creationDate self.encryptionKeyArn = encryptionKeyArn self.iamRoleArn = iamRoleArn + self.indexStatus = indexStatus + self.indexStatusMessage = indexStatusMessage self.isEncrypted = isEncrypted self.isParent = isParent self.lastRestoreTime = lastRestoreTime @@ -2378,6 +2409,8 @@ extension Backup { case creationDate = "CreationDate" case encryptionKeyArn = "EncryptionKeyArn" case iamRoleArn = "IamRoleArn" + case indexStatus = "IndexStatus" + case indexStatusMessage = "IndexStatusMessage" case isEncrypted = "IsEncrypted" case isParent = "IsParent" case lastRestoreTime = "LastRestoreTime" @@ -3075,6 +3108,78 @@ extension Backup { } } + public struct GetRecoveryPointIndexDetailsInput: AWSEncodableShape { + /// The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Region where they are created. Accepted characters include lowercase letters, numbers, and hyphens. + public let backupVaultName: String + /// An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. + public let recoveryPointArn: String + + @inlinable + public init(backupVaultName: String, recoveryPointArn: String) { + self.backupVaultName = backupVaultName + self.recoveryPointArn = recoveryPointArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.backupVaultName, key: "BackupVaultName") + request.encodePath(self.recoveryPointArn, key: "RecoveryPointArn") + } + + public func validate(name: String) throws { + try self.validate(self.backupVaultName, name: "backupVaultName", parent: name, pattern: "^[a-zA-Z0-9\\-\\_]{2,50}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetRecoveryPointIndexDetailsOutput: AWSDecodableShape { + /// An ARN that uniquely identifies the backup vault where the recovery point index is stored. For example, arn:aws:backup:us-east-1:123456789012:backup-vault:aBackupVault. + public let backupVaultArn: String? + /// The date and time that a backup index finished creation, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM. + public let indexCompletionDate: Date? + /// The date and time that a backup index was created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM. + public let indexCreationDate: Date? + /// The date and time that a backup index was deleted, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM. + public let indexDeletionDate: Date? + /// This is the current status for the backup index associated with the specified recovery point. Statuses are: PENDING | ACTIVE | FAILED | DELETING A recovery point with an index that has the status of ACTIVE can be included in a search. + public let indexStatus: IndexStatus? + /// A detailed message explaining the status of a backup index associated with the recovery point. + public let indexStatusMessage: String? + /// An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. + public let recoveryPointArn: String? + /// A string of the Amazon Resource Name (ARN) that uniquely identifies the source resource. + public let sourceResourceArn: String? + /// Count of items within the backup index associated with the recovery point. + public let totalItemsIndexed: Int64? + + @inlinable + public init(backupVaultArn: String? = nil, indexCompletionDate: Date? = nil, indexCreationDate: Date? = nil, indexDeletionDate: Date? = nil, indexStatus: IndexStatus? = nil, indexStatusMessage: String? = nil, recoveryPointArn: String? = nil, sourceResourceArn: String? = nil, totalItemsIndexed: Int64? = nil) { + self.backupVaultArn = backupVaultArn + self.indexCompletionDate = indexCompletionDate + self.indexCreationDate = indexCreationDate + self.indexDeletionDate = indexDeletionDate + self.indexStatus = indexStatus + self.indexStatusMessage = indexStatusMessage + self.recoveryPointArn = recoveryPointArn + self.sourceResourceArn = sourceResourceArn + self.totalItemsIndexed = totalItemsIndexed + } + + private enum CodingKeys: String, CodingKey { + case backupVaultArn = "BackupVaultArn" + case indexCompletionDate = "IndexCompletionDate" + case indexCreationDate = "IndexCreationDate" + case indexDeletionDate = "IndexDeletionDate" + case indexStatus = "IndexStatus" + case indexStatusMessage = "IndexStatusMessage" + case recoveryPointArn = "RecoveryPointArn" + case sourceResourceArn = "SourceResourceArn" + case totalItemsIndexed = "TotalItemsIndexed" + } + } + public struct GetRecoveryPointRestoreMetadataInput: AWSEncodableShape { /// The account ID of the specified backup vault. public let backupVaultAccountId: String? @@ -3290,6 +3395,72 @@ extension Backup { } } + public struct IndexAction: AWSEncodableShape & AWSDecodableShape { + /// 0 or 1 index action will be accepted for each BackupRule. Valid values: EBS for Amazon Elastic Block Store S3 for Amazon Simple Storage Service (Amazon S3) + public let resourceTypes: [String]? + + @inlinable + public init(resourceTypes: [String]? = nil) { + self.resourceTypes = resourceTypes + } + + public func validate(name: String) throws { + try self.resourceTypes?.forEach { + try validate($0, name: "resourceTypes[]", parent: name, pattern: "^[a-zA-Z0-9\\-\\_\\.]{1,50}$") + } + } + + private enum CodingKeys: String, CodingKey { + case resourceTypes = "ResourceTypes" + } + } + + public struct IndexedRecoveryPoint: AWSDecodableShape { + /// The date and time that a backup was created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM. + public let backupCreationDate: Date? + /// An ARN that uniquely identifies the backup vault where the recovery point index is stored. For example, arn:aws:backup:us-east-1:123456789012:backup-vault:aBackupVault. + public let backupVaultArn: String? + /// This specifies the IAM role ARN used for this operation. For example, arn:aws:iam::123456789012:role/S3Access + public let iamRoleArn: String? + /// The date and time that a backup index was created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM. + public let indexCreationDate: Date? + /// This is the current status for the backup index associated with the specified recovery point. Statuses are: PENDING | ACTIVE | FAILED | DELETING A recovery point with an index that has the status of ACTIVE can be included in a search. + public let indexStatus: IndexStatus? + /// A string in the form of a detailed message explaining the status of a backup index associated with the recovery point. + public let indexStatusMessage: String? + /// An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45 + public let recoveryPointArn: String? + /// The resource type of the indexed recovery point. EBS for Amazon Elastic Block Store S3 for Amazon Simple Storage Service (Amazon S3) + public let resourceType: String? + /// A string of the Amazon Resource Name (ARN) that uniquely identifies the source resource. + public let sourceResourceArn: String? + + @inlinable + public init(backupCreationDate: Date? = nil, backupVaultArn: String? = nil, iamRoleArn: String? = nil, indexCreationDate: Date? = nil, indexStatus: IndexStatus? = nil, indexStatusMessage: String? = nil, recoveryPointArn: String? = nil, resourceType: String? = nil, sourceResourceArn: String? = nil) { + self.backupCreationDate = backupCreationDate + self.backupVaultArn = backupVaultArn + self.iamRoleArn = iamRoleArn + self.indexCreationDate = indexCreationDate + self.indexStatus = indexStatus + self.indexStatusMessage = indexStatusMessage + self.recoveryPointArn = recoveryPointArn + self.resourceType = resourceType + self.sourceResourceArn = sourceResourceArn + } + + private enum CodingKeys: String, CodingKey { + case backupCreationDate = "BackupCreationDate" + case backupVaultArn = "BackupVaultArn" + case iamRoleArn = "IamRoleArn" + case indexCreationDate = "IndexCreationDate" + case indexStatus = "IndexStatus" + case indexStatusMessage = "IndexStatusMessage" + case recoveryPointArn = "RecoveryPointArn" + case resourceType = "ResourceType" + case sourceResourceArn = "SourceResourceArn" + } + } + public struct KeyValue: AWSEncodableShape & AWSDecodableShape { /// The tag key (String). The key can't start with aws:. Length Constraints: Minimum length of 1. Maximum length of 128. Pattern: ^(?![aA]{1}[wW]{1}[sS]{1}:)([\p{L}\p{Z}\p{N}_.:/=+\-@]+)$ public let key: String @@ -3983,6 +4154,72 @@ extension Backup { } } + public struct ListIndexedRecoveryPointsInput: AWSEncodableShape { + /// Returns only indexed recovery points that were created after the specified date. + public let createdAfter: Date? + /// Returns only indexed recovery points that were created before the specified date. + public let createdBefore: Date? + /// Include this parameter to filter the returned list by the indicated statuses. Accepted values: PENDING | ACTIVE | FAILED | DELETING A recovery point with an index that has the status of ACTIVE can be included in a search. + public let indexStatus: IndexStatus? + /// The maximum number of resource list items to be returned. + public let maxResults: Int? + /// The next item following a partial list of returned recovery points. For example, if a request is made to return MaxResults number of indexed recovery points, NextToken allows you to return more items in your list starting at the location pointed to by the next token. + public let nextToken: String? + /// Returns a list of indexed recovery points for the specified resource type(s). Accepted values include: EBS for Amazon Elastic Block Store S3 for Amazon Simple Storage Service (Amazon S3) + public let resourceType: String? + /// A string of the Amazon Resource Name (ARN) that uniquely identifies the source resource. + public let sourceResourceArn: String? + + @inlinable + public init(createdAfter: Date? = nil, createdBefore: Date? = nil, indexStatus: IndexStatus? = nil, maxResults: Int? = nil, nextToken: String? = nil, resourceType: String? = nil, sourceResourceArn: String? = nil) { + self.createdAfter = createdAfter + self.createdBefore = createdBefore + self.indexStatus = indexStatus + self.maxResults = maxResults + self.nextToken = nextToken + self.resourceType = resourceType + self.sourceResourceArn = sourceResourceArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.createdAfter, key: "createdAfter") + request.encodeQuery(self.createdBefore, key: "createdBefore") + request.encodeQuery(self.indexStatus, key: "indexStatus") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodeQuery(self.resourceType, key: "resourceType") + request.encodeQuery(self.sourceResourceArn, key: "sourceResourceArn") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.resourceType, name: "resourceType", parent: name, pattern: "^[a-zA-Z0-9\\-\\_\\.]{1,50}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListIndexedRecoveryPointsOutput: AWSDecodableShape { + /// This is a list of recovery points that have an associated index, belonging to the specified account. + public let indexedRecoveryPoints: [IndexedRecoveryPoint]? + /// The next item following a partial list of returned recovery points. For example, if a request is made to return MaxResults number of indexed recovery points, NextToken allows you to return more items in your list starting at the location pointed to by the next token. + public let nextToken: String? + + @inlinable + public init(indexedRecoveryPoints: [IndexedRecoveryPoint]? = nil, nextToken: String? = nil) { + self.indexedRecoveryPoints = indexedRecoveryPoints + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case indexedRecoveryPoints = "IndexedRecoveryPoints" + case nextToken = "NextToken" + } + } + public struct ListLegalHoldsInput: AWSEncodableShape { /// The maximum number of resource list items to be returned. public let maxResults: Int? @@ -4970,6 +5207,10 @@ extension Backup { public let encryptionKeyArn: String? /// Specifies the IAM role ARN used to create the target recovery point; for example, arn:aws:iam::123456789012:role/S3Access. public let iamRoleArn: String? + /// This is the current status for the backup index associated with the specified recovery point. Statuses are: PENDING | ACTIVE | FAILED | DELETING A recovery point with an index that has the status of ACTIVE can be included in a search. + public let indexStatus: IndexStatus? + /// A string in the form of a detailed message explaining the status of a backup index associated with the recovery point. + public let indexStatusMessage: String? /// A Boolean value that is returned as TRUE if the specified recovery point is encrypted, or FALSE if the recovery point is not encrypted. public let isEncrypted: Bool? /// This is a boolean value indicating this is a parent (composite) recovery point. @@ -4998,7 +5239,7 @@ extension Backup { public let vaultType: VaultType? @inlinable - public init(backupSizeInBytes: Int64? = nil, backupVaultArn: String? = nil, backupVaultName: String? = nil, calculatedLifecycle: CalculatedLifecycle? = nil, completionDate: Date? = nil, compositeMemberIdentifier: String? = nil, createdBy: RecoveryPointCreator? = nil, creationDate: Date? = nil, encryptionKeyArn: String? = nil, iamRoleArn: String? = nil, isEncrypted: Bool? = nil, isParent: Bool? = nil, lastRestoreTime: Date? = nil, lifecycle: Lifecycle? = nil, parentRecoveryPointArn: String? = nil, recoveryPointArn: String? = nil, resourceArn: String? = nil, resourceName: String? = nil, resourceType: String? = nil, sourceBackupVaultArn: String? = nil, status: RecoveryPointStatus? = nil, statusMessage: String? = nil, vaultType: VaultType? = nil) { + public init(backupSizeInBytes: Int64? = nil, backupVaultArn: String? = nil, backupVaultName: String? = nil, calculatedLifecycle: CalculatedLifecycle? = nil, completionDate: Date? = nil, compositeMemberIdentifier: String? = nil, createdBy: RecoveryPointCreator? = nil, creationDate: Date? = nil, encryptionKeyArn: String? = nil, iamRoleArn: String? = nil, indexStatus: IndexStatus? = nil, indexStatusMessage: String? = nil, isEncrypted: Bool? = nil, isParent: Bool? = nil, lastRestoreTime: Date? = nil, lifecycle: Lifecycle? = nil, parentRecoveryPointArn: String? = nil, recoveryPointArn: String? = nil, resourceArn: String? = nil, resourceName: String? = nil, resourceType: String? = nil, sourceBackupVaultArn: String? = nil, status: RecoveryPointStatus? = nil, statusMessage: String? = nil, vaultType: VaultType? = nil) { self.backupSizeInBytes = backupSizeInBytes self.backupVaultArn = backupVaultArn self.backupVaultName = backupVaultName @@ -5009,6 +5250,8 @@ extension Backup { self.creationDate = creationDate self.encryptionKeyArn = encryptionKeyArn self.iamRoleArn = iamRoleArn + self.indexStatus = indexStatus + self.indexStatusMessage = indexStatusMessage self.isEncrypted = isEncrypted self.isParent = isParent self.lastRestoreTime = lastRestoreTime @@ -5035,6 +5278,8 @@ extension Backup { case creationDate = "CreationDate" case encryptionKeyArn = "EncryptionKeyArn" case iamRoleArn = "IamRoleArn" + case indexStatus = "IndexStatus" + case indexStatusMessage = "IndexStatusMessage" case isEncrypted = "IsEncrypted" case isParent = "IsParent" case lastRestoreTime = "LastRestoreTime" @@ -5060,6 +5305,10 @@ extension Backup { public let creationDate: Date? /// The server-side encryption key that is used to protect your backups; for example, arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab. public let encryptionKeyArn: String? + /// This is the current status for the backup index associated with the specified recovery point. Statuses are: PENDING | ACTIVE | FAILED | DELETING A recovery point with an index that has the status of ACTIVE can be included in a search. + public let indexStatus: IndexStatus? + /// A string in the form of a detailed message explaining the status of a backup index associated with the recovery point. + public let indexStatusMessage: String? /// This is a boolean value indicating this is a parent (composite) recovery point. public let isParent: Bool? /// The Amazon Resource Name (ARN) of the parent (composite) recovery point. @@ -5076,11 +5325,13 @@ extension Backup { public let vaultType: VaultType? @inlinable - public init(backupSizeBytes: Int64? = nil, backupVaultName: String? = nil, creationDate: Date? = nil, encryptionKeyArn: String? = nil, isParent: Bool? = nil, parentRecoveryPointArn: String? = nil, recoveryPointArn: String? = nil, resourceName: String? = nil, status: RecoveryPointStatus? = nil, statusMessage: String? = nil, vaultType: VaultType? = nil) { + public init(backupSizeBytes: Int64? = nil, backupVaultName: String? = nil, creationDate: Date? = nil, encryptionKeyArn: String? = nil, indexStatus: IndexStatus? = nil, indexStatusMessage: String? = nil, isParent: Bool? = nil, parentRecoveryPointArn: String? = nil, recoveryPointArn: String? = nil, resourceName: String? = nil, status: RecoveryPointStatus? = nil, statusMessage: String? = nil, vaultType: VaultType? = nil) { self.backupSizeBytes = backupSizeBytes self.backupVaultName = backupVaultName self.creationDate = creationDate self.encryptionKeyArn = encryptionKeyArn + self.indexStatus = indexStatus + self.indexStatusMessage = indexStatusMessage self.isParent = isParent self.parentRecoveryPointArn = parentRecoveryPointArn self.recoveryPointArn = recoveryPointArn @@ -5095,6 +5346,8 @@ extension Backup { case backupVaultName = "BackupVaultName" case creationDate = "CreationDate" case encryptionKeyArn = "EncryptionKeyArn" + case indexStatus = "IndexStatus" + case indexStatusMessage = "IndexStatusMessage" case isParent = "IsParent" case parentRecoveryPointArn = "ParentRecoveryPointArn" case recoveryPointArn = "RecoveryPointArn" @@ -5819,6 +6072,8 @@ extension Backup { public let iamRoleArn: String /// A customer-chosen string that you can use to distinguish between otherwise identical calls to StartBackupJob. Retrying a successful request with the same idempotency token results in a success message with no action taken. public let idempotencyToken: String? + /// Include this parameter to enable index creation if your backup job has a resource type that supports backup indexes. Resource types that support backup indexes include: EBS for Amazon Elastic Block Store S3 for Amazon Simple Storage Service (Amazon S3) Index can have 1 of 2 possible values, either ENABLED or DISABLED. To create a backup index for an eligible ACTIVE recovery point that does not yet have a backup index, set value to ENABLED. To delete a backup index, set value to DISABLED. + public let index: Index? /// The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Backup will transition and expire backups automatically according to the lifecycle that you define. Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “retention” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold. Resource types that can transition to cold storage are listed in the Feature availability by resource table. Backup ignores this expression for other resource types. This parameter has a maximum value of 100 years (36,500 days). public let lifecycle: Lifecycle? /// The tags to assign to the resources. @@ -5829,12 +6084,13 @@ extension Backup { public let startWindowMinutes: Int64? @inlinable - public init(backupOptions: [String: String]? = nil, backupVaultName: String, completeWindowMinutes: Int64? = nil, iamRoleArn: String, idempotencyToken: String? = nil, lifecycle: Lifecycle? = nil, recoveryPointTags: [String: String]? = nil, resourceArn: String, startWindowMinutes: Int64? = nil) { + public init(backupOptions: [String: String]? = nil, backupVaultName: String, completeWindowMinutes: Int64? = nil, iamRoleArn: String, idempotencyToken: String? = nil, index: Index? = nil, lifecycle: Lifecycle? = nil, recoveryPointTags: [String: String]? = nil, resourceArn: String, startWindowMinutes: Int64? = nil) { self.backupOptions = backupOptions self.backupVaultName = backupVaultName self.completeWindowMinutes = completeWindowMinutes self.iamRoleArn = iamRoleArn self.idempotencyToken = idempotencyToken + self.index = index self.lifecycle = lifecycle self.recoveryPointTags = recoveryPointTags self.resourceArn = resourceArn @@ -5855,6 +6111,7 @@ extension Backup { case completeWindowMinutes = "CompleteWindowMinutes" case iamRoleArn = "IamRoleArn" case idempotencyToken = "IdempotencyToken" + case index = "Index" case lifecycle = "Lifecycle" case recoveryPointTags = "RecoveryPointTags" case resourceArn = "ResourceArn" @@ -6248,6 +6505,69 @@ extension Backup { } } + public struct UpdateRecoveryPointIndexSettingsInput: AWSEncodableShape { + /// The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Region where they are created. Accepted characters include lowercase letters, numbers, and hyphens. + public let backupVaultName: String + /// This specifies the IAM role ARN used for this operation. For example, arn:aws:iam::123456789012:role/S3Access + public let iamRoleArn: String? + /// Index can have 1 of 2 possible values, either ENABLED or DISABLED. To create a backup index for an eligible ACTIVE recovery point that does not yet have a backup index, set value to ENABLED. To delete a backup index, set value to DISABLED. + public let index: Index + /// An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. + public let recoveryPointArn: String + + @inlinable + public init(backupVaultName: String, iamRoleArn: String? = nil, index: Index, recoveryPointArn: String) { + self.backupVaultName = backupVaultName + self.iamRoleArn = iamRoleArn + self.index = index + self.recoveryPointArn = recoveryPointArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.backupVaultName, key: "BackupVaultName") + try container.encodeIfPresent(self.iamRoleArn, forKey: .iamRoleArn) + try container.encode(self.index, forKey: .index) + request.encodePath(self.recoveryPointArn, key: "RecoveryPointArn") + } + + public func validate(name: String) throws { + try self.validate(self.backupVaultName, name: "backupVaultName", parent: name, pattern: "^[a-zA-Z0-9\\-\\_]{2,50}$") + } + + private enum CodingKeys: String, CodingKey { + case iamRoleArn = "IamRoleArn" + case index = "Index" + } + } + + public struct UpdateRecoveryPointIndexSettingsOutput: AWSDecodableShape { + /// The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Region where they are created. + public let backupVaultName: String? + /// Index can have 1 of 2 possible values, either ENABLED or DISABLED. A value of ENABLED means a backup index for an eligible ACTIVE recovery point has been created. A value of DISABLED means a backup index was deleted. + public let index: Index? + /// This is the current status for the backup index associated with the specified recovery point. Statuses are: PENDING | ACTIVE | FAILED | DELETING A recovery point with an index that has the status of ACTIVE can be included in a search. + public let indexStatus: IndexStatus? + /// An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. + public let recoveryPointArn: String? + + @inlinable + public init(backupVaultName: String? = nil, index: Index? = nil, indexStatus: IndexStatus? = nil, recoveryPointArn: String? = nil) { + self.backupVaultName = backupVaultName + self.index = index + self.indexStatus = indexStatus + self.recoveryPointArn = recoveryPointArn + } + + private enum CodingKeys: String, CodingKey { + case backupVaultName = "BackupVaultName" + case index = "Index" + case indexStatus = "IndexStatus" + case recoveryPointArn = "RecoveryPointArn" + } + } + public struct UpdateRecoveryPointLifecycleInput: AWSEncodableShape { /// The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created. public let backupVaultName: String diff --git a/Sources/Soto/Services/BackupSearch/BackupSearch_api.swift b/Sources/Soto/Services/BackupSearch/BackupSearch_api.swift new file mode 100644 index 0000000000..6d14d58dce --- /dev/null +++ b/Sources/Soto/Services/BackupSearch/BackupSearch_api.swift @@ -0,0 +1,551 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2024 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +#if os(Linux) && compiler(<5.10) +// swift-corelibs-foundation hasn't been updated with Sendable conformances +@preconcurrency import Foundation +#else +import Foundation +#endif +@_exported import SotoCore + +/// Service object for interacting with AWS BackupSearch service. +/// +/// Backup Search Backup Search is the recovery point and item level search for Backup. For additional information, see: Backup API Reference Backup Developer Guide +public struct BackupSearch: AWSService { + // MARK: Member variables + + /// Client used for communication with AWS + public let client: AWSClient + /// Service configuration + public let config: AWSServiceConfig + + // MARK: Initialization + + /// Initialize the BackupSearch client + /// - parameters: + /// - client: AWSClient used to process requests + /// - region: Region of server you want to communicate with. This will override the partition parameter. + /// - partition: AWS partition where service resides, standard (.aws), china (.awscn), government (.awsusgov). + /// - endpoint: Custom endpoint URL to use instead of standard AWS servers + /// - middleware: Middleware chain used to edit requests before they are sent and responses before they are decoded + /// - timeout: Timeout value for HTTP requests + /// - byteBufferAllocator: Allocator for ByteBuffers + /// - options: Service options + public init( + client: AWSClient, + region: SotoCore.Region? = nil, + partition: AWSPartition = .aws, + endpoint: String? = nil, + middleware: AWSMiddlewareProtocol? = nil, + timeout: TimeAmount? = nil, + byteBufferAllocator: ByteBufferAllocator = ByteBufferAllocator(), + options: AWSServiceConfig.Options = [] + ) { + self.client = client + self.config = AWSServiceConfig( + region: region, + partition: region?.partition ?? partition, + serviceName: "BackupSearch", + serviceIdentifier: "backup-search", + serviceProtocol: .restjson, + apiVersion: "2018-05-10", + endpoint: endpoint, + errorType: BackupSearchErrorType.self, + middleware: middleware, + timeout: timeout, + byteBufferAllocator: byteBufferAllocator, + options: options + ) + } + + + + + + // MARK: API Calls + + /// This operation retrieves metadata of a search job, including its progress. + @Sendable + @inlinable + public func getSearchJob(_ input: GetSearchJobInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetSearchJobOutput { + try await self.client.execute( + operation: "GetSearchJob", + path: "/search-jobs/{SearchJobIdentifier}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// This operation retrieves metadata of a search job, including its progress. + /// + /// Parameters: + /// - searchJobIdentifier: Required unique string that specifies the search job. + /// - logger: Logger use during operation + @inlinable + public func getSearchJob( + searchJobIdentifier: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetSearchJobOutput { + let input = GetSearchJobInput( + searchJobIdentifier: searchJobIdentifier + ) + return try await self.getSearchJob(input, logger: logger) + } + + /// This operation retrieves the metadata of an export job. An export job is an operation that transmits the results of a search job to a specified S3 bucket in a .csv file. An export job allows you to retain results of a search beyond the search job's scheduled retention of 7 days. + @Sendable + @inlinable + public func getSearchResultExportJob(_ input: GetSearchResultExportJobInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetSearchResultExportJobOutput { + try await self.client.execute( + operation: "GetSearchResultExportJob", + path: "/export-search-jobs/{ExportJobIdentifier}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// This operation retrieves the metadata of an export job. An export job is an operation that transmits the results of a search job to a specified S3 bucket in a .csv file. An export job allows you to retain results of a search beyond the search job's scheduled retention of 7 days. + /// + /// Parameters: + /// - exportJobIdentifier: This is the unique string that identifies a specific export job. Required for this operation. + /// - logger: Logger use during operation + @inlinable + public func getSearchResultExportJob( + exportJobIdentifier: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetSearchResultExportJobOutput { + let input = GetSearchResultExportJobInput( + exportJobIdentifier: exportJobIdentifier + ) + return try await self.getSearchResultExportJob(input, logger: logger) + } + + /// This operation returns a list of all backups (recovery points) in a paginated format that were included in the search job. If a search does not display an expected backup in the results, you can call this operation to display each backup included in the search. Any backups that were not included because they have a FAILED status from a permissions issue will be displayed, along with a status message. Only recovery points with a backup index that has a status of ACTIVE will be included in search results. If the index has any other status, its status will be displayed along with a status message. + @Sendable + @inlinable + public func listSearchJobBackups(_ input: ListSearchJobBackupsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListSearchJobBackupsOutput { + try await self.client.execute( + operation: "ListSearchJobBackups", + path: "/search-jobs/{SearchJobIdentifier}/backups", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// This operation returns a list of all backups (recovery points) in a paginated format that were included in the search job. If a search does not display an expected backup in the results, you can call this operation to display each backup included in the search. Any backups that were not included because they have a FAILED status from a permissions issue will be displayed, along with a status message. Only recovery points with a backup index that has a status of ACTIVE will be included in search results. If the index has any other status, its status will be displayed along with a status message. + /// + /// Parameters: + /// - maxResults: The maximum number of resource list items to be returned. + /// - nextToken: The next item following a partial list of returned backups included in a search job. For example, if a request is made to return MaxResults number of backups, NextToken allows you to return more items in your list starting at the location pointed to by the next token. + /// - searchJobIdentifier: The unique string that specifies the search job. + /// - logger: Logger use during operation + @inlinable + public func listSearchJobBackups( + maxResults: Int? = nil, + nextToken: String? = nil, + searchJobIdentifier: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListSearchJobBackupsOutput { + let input = ListSearchJobBackupsInput( + maxResults: maxResults, + nextToken: nextToken, + searchJobIdentifier: searchJobIdentifier + ) + return try await self.listSearchJobBackups(input, logger: logger) + } + + /// This operation returns a list of a specified search job. + @Sendable + @inlinable + public func listSearchJobResults(_ input: ListSearchJobResultsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListSearchJobResultsOutput { + try await self.client.execute( + operation: "ListSearchJobResults", + path: "/search-jobs/{SearchJobIdentifier}/search-results", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// This operation returns a list of a specified search job. + /// + /// Parameters: + /// - maxResults: The maximum number of resource list items to be returned. + /// - nextToken: The next item following a partial list of returned search job results. For example, if a request is made to return MaxResults number of search job results, NextToken allows you to return more items in your list starting at the location pointed to by the next token. + /// - searchJobIdentifier: The unique string that specifies the search job. + /// - logger: Logger use during operation + @inlinable + public func listSearchJobResults( + maxResults: Int? = nil, + nextToken: String? = nil, + searchJobIdentifier: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListSearchJobResultsOutput { + let input = ListSearchJobResultsInput( + maxResults: maxResults, + nextToken: nextToken, + searchJobIdentifier: searchJobIdentifier + ) + return try await self.listSearchJobResults(input, logger: logger) + } + + /// This operation returns a list of search jobs belonging to an account. + @Sendable + @inlinable + public func listSearchJobs(_ input: ListSearchJobsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListSearchJobsOutput { + try await self.client.execute( + operation: "ListSearchJobs", + path: "/search-jobs", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// This operation returns a list of search jobs belonging to an account. + /// + /// Parameters: + /// - byStatus: Include this parameter to filter list by search job status. + /// - maxResults: The maximum number of resource list items to be returned. + /// - nextToken: The next item following a partial list of returned search jobs. For example, if a request is made to return MaxResults number of backups, NextToken allows you to return more items in your list starting at the location pointed to by the next token. + /// - logger: Logger use during operation + @inlinable + public func listSearchJobs( + byStatus: SearchJobState? = nil, + maxResults: Int? = nil, + nextToken: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListSearchJobsOutput { + let input = ListSearchJobsInput( + byStatus: byStatus, + maxResults: maxResults, + nextToken: nextToken + ) + return try await self.listSearchJobs(input, logger: logger) + } + + /// This operation exports search results of a search job to a specified destination S3 bucket. + @Sendable + @inlinable + public func listSearchResultExportJobs(_ input: ListSearchResultExportJobsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListSearchResultExportJobsOutput { + try await self.client.execute( + operation: "ListSearchResultExportJobs", + path: "/export-search-jobs", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// This operation exports search results of a search job to a specified destination S3 bucket. + /// + /// Parameters: + /// - maxResults: The maximum number of resource list items to be returned. + /// - nextToken: The next item following a partial list of returned backups included in a search job. For example, if a request is made to return MaxResults number of backups, NextToken allows you to return more items in your list starting at the location pointed to by the next token. + /// - searchJobIdentifier: The unique string that specifies the search job. + /// - status: The search jobs to be included in the export job can be filtered by including this parameter. + /// - logger: Logger use during operation + @inlinable + public func listSearchResultExportJobs( + maxResults: Int? = nil, + nextToken: String? = nil, + searchJobIdentifier: String? = nil, + status: ExportJobStatus? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListSearchResultExportJobsOutput { + let input = ListSearchResultExportJobsInput( + maxResults: maxResults, + nextToken: nextToken, + searchJobIdentifier: searchJobIdentifier, + status: status + ) + return try await self.listSearchResultExportJobs(input, logger: logger) + } + + /// This operation returns the tags for a resource type. + @Sendable + @inlinable + public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse { + try await self.client.execute( + operation: "ListTagsForResource", + path: "/tags/{ResourceArn}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// This operation returns the tags for a resource type. + /// + /// Parameters: + /// - resourceArn: The Amazon Resource Name (ARN) that uniquely identifies the resource.> + /// - logger: Logger use during operation + @inlinable + public func listTagsForResource( + resourceArn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListTagsForResourceResponse { + let input = ListTagsForResourceRequest( + resourceArn: resourceArn + ) + return try await self.listTagsForResource(input, logger: logger) + } + + /// This operation creates a search job which returns recovery points filtered by SearchScope and items filtered by ItemFilters. You can optionally include ClientToken, EncryptionKeyArn, Name, and/or Tags. + @Sendable + @inlinable + public func startSearchJob(_ input: StartSearchJobInput, logger: Logger = AWSClient.loggingDisabled) async throws -> StartSearchJobOutput { + try await self.client.execute( + operation: "StartSearchJob", + path: "/search-jobs", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// This operation creates a search job which returns recovery points filtered by SearchScope and items filtered by ItemFilters. You can optionally include ClientToken, EncryptionKeyArn, Name, and/or Tags. + /// + /// Parameters: + /// - clientToken: Include this parameter to allow multiple identical calls for idempotency. A client token is valid for 8 hours after the first request that uses it is completed. After this time, any request with the same token is treated as a new request. + /// - encryptionKeyArn: The encryption key for the specified search job. + /// - itemFilters: Item Filters represent all input item properties specified when the search was created. Contains either EBSItemFilters or S3ItemFilters + /// - name: Include alphanumeric characters to create a name for this search job. + /// - searchScope: This object can contain BackupResourceTypes, BackupResourceArns, BackupResourceCreationTime, BackupResourceTags, and SourceResourceArns to filter the recovery points returned by the search job. + /// - tags: List of tags returned by the operation. + /// - logger: Logger use during operation + @inlinable + public func startSearchJob( + clientToken: String? = nil, + encryptionKeyArn: String? = nil, + itemFilters: ItemFilters? = nil, + name: String? = nil, + searchScope: SearchScope, + tags: [String: String]? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> StartSearchJobOutput { + let input = StartSearchJobInput( + clientToken: clientToken, + encryptionKeyArn: encryptionKeyArn, + itemFilters: itemFilters, + name: name, + searchScope: searchScope, + tags: tags + ) + return try await self.startSearchJob(input, logger: logger) + } + + /// This operations starts a job to export the results of search job to a designated S3 bucket. + @Sendable + @inlinable + public func startSearchResultExportJob(_ input: StartSearchResultExportJobInput, logger: Logger = AWSClient.loggingDisabled) async throws -> StartSearchResultExportJobOutput { + try await self.client.execute( + operation: "StartSearchResultExportJob", + path: "/export-search-jobs", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// This operations starts a job to export the results of search job to a designated S3 bucket. + /// + /// Parameters: + /// - clientToken: Include this parameter to allow multiple identical calls for idempotency. A client token is valid for 8 hours after the first request that uses it is completed. After this time, any request with the same token is treated as a new request. + /// - exportSpecification: This specification contains a required string of the destination bucket; optionally, you can include the destination prefix. + /// - roleArn: This parameter specifies the role ARN used to start the search results export jobs. + /// - searchJobIdentifier: The unique string that specifies the search job. + /// - tags: Optional tags to include. A tag is a key-value pair you can use to manage, filter, and search for your resources. Allowed characters include UTF-8 letters, numbers, spaces, and the following characters: + - = . _ : /. + /// - logger: Logger use during operation + @inlinable + public func startSearchResultExportJob( + clientToken: String? = nil, + exportSpecification: ExportSpecification, + roleArn: String? = nil, + searchJobIdentifier: String, + tags: [String: String]? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> StartSearchResultExportJobOutput { + let input = StartSearchResultExportJobInput( + clientToken: clientToken, + exportSpecification: exportSpecification, + roleArn: roleArn, + searchJobIdentifier: searchJobIdentifier, + tags: tags + ) + return try await self.startSearchResultExportJob(input, logger: logger) + } + + /// This operations ends a search job. Only a search job with a status of RUNNING can be stopped. + @Sendable + @inlinable + public func stopSearchJob(_ input: StopSearchJobInput, logger: Logger = AWSClient.loggingDisabled) async throws -> StopSearchJobOutput { + try await self.client.execute( + operation: "StopSearchJob", + path: "/search-jobs/{SearchJobIdentifier}/actions/cancel", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// This operations ends a search job. Only a search job with a status of RUNNING can be stopped. + /// + /// Parameters: + /// - searchJobIdentifier: The unique string that specifies the search job. + /// - logger: Logger use during operation + @inlinable + public func stopSearchJob( + searchJobIdentifier: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> StopSearchJobOutput { + let input = StopSearchJobInput( + searchJobIdentifier: searchJobIdentifier + ) + return try await self.stopSearchJob(input, logger: logger) + } + + /// This operation puts tags on the resource you indicate. + @Sendable + @inlinable + public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse { + try await self.client.execute( + operation: "TagResource", + path: "/tags/{ResourceArn}", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// This operation puts tags on the resource you indicate. + /// + /// Parameters: + /// - resourceArn: The Amazon Resource Name (ARN) that uniquely identifies the resource. This is the resource that will have the indicated tags. + /// - tags: Required tags to include. A tag is a key-value pair you can use to manage, filter, and search for your resources. Allowed characters include UTF-8 letters, numbers, spaces, and the following characters: + - = . _ : /. + /// - logger: Logger use during operation + @inlinable + public func tagResource( + resourceArn: String, + tags: [String: String], + logger: Logger = AWSClient.loggingDisabled + ) async throws -> TagResourceResponse { + let input = TagResourceRequest( + resourceArn: resourceArn, + tags: tags + ) + return try await self.tagResource(input, logger: logger) + } + + /// This operation removes tags from the specified resource. + @Sendable + @inlinable + public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResponse { + try await self.client.execute( + operation: "UntagResource", + path: "/tags/{ResourceArn}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// This operation removes tags from the specified resource. + /// + /// Parameters: + /// - resourceArn: The Amazon Resource Name (ARN) that uniquely identifies the resource where you want to remove tags. + /// - tagKeys: This required parameter contains the tag keys you want to remove from the source. + /// - logger: Logger use during operation + @inlinable + public func untagResource( + resourceArn: String, + tagKeys: [String], + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UntagResourceResponse { + let input = UntagResourceRequest( + resourceArn: resourceArn, + tagKeys: tagKeys + ) + return try await self.untagResource(input, logger: logger) + } +} + +extension BackupSearch { + /// Initializer required by `AWSService.with(middlewares:timeout:byteBufferAllocator:options)`. You are not able to use this initializer directly as there are not public + /// initializers for `AWSServiceConfig.Patch`. Please use `AWSService.with(middlewares:timeout:byteBufferAllocator:options)` instead. + public init(from: BackupSearch, patch: AWSServiceConfig.Patch) { + self.client = from.client + self.config = from.config.with(patch: patch) + } +} + +// MARK: Paginators + +@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) +extension BackupSearch { + /// Return PaginatorSequence for operation ``listSearchJobBackups(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listSearchJobBackupsPaginator( + _ input: ListSearchJobBackupsInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listSearchJobBackups, + inputKey: \ListSearchJobBackupsInput.nextToken, + outputKey: \ListSearchJobBackupsOutput.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listSearchJobBackups(_:logger:)``. + /// + /// - Parameters: + /// - maxResults: The maximum number of resource list items to be returned. + /// - searchJobIdentifier: The unique string that specifies the search job. + /// - logger: Logger used for logging + @inlinable + public func listSearchJobBackupsPaginator( + maxResults: Int? = nil, + searchJobIdentifier: String, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListSearchJobBackupsInput( + maxResults: maxResults, + searchJobIdentifier: searchJobIdentifier + ) + return self.listSearchJobBackupsPaginator(input, logger: logger) + } +} + +extension BackupSearch.ListSearchJobBackupsInput: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> BackupSearch.ListSearchJobBackupsInput { + return .init( + maxResults: self.maxResults, + nextToken: token, + searchJobIdentifier: self.searchJobIdentifier + ) + } +} diff --git a/Sources/Soto/Services/BackupSearch/BackupSearch_shapes.swift b/Sources/Soto/Services/BackupSearch/BackupSearch_shapes.swift new file mode 100644 index 0000000000..33b6ce1042 --- /dev/null +++ b/Sources/Soto/Services/BackupSearch/BackupSearch_shapes.swift @@ -0,0 +1,1202 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2024 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +#if os(Linux) && compiler(<5.10) +// swift-corelibs-foundation hasn't been updated with Sendable conformances +@preconcurrency import Foundation +#else +import Foundation +#endif +@_spi(SotoInternal) import SotoCore + +extension BackupSearch { + // MARK: Enums + + public enum ExportJobStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case completed = "COMPLETED" + case failed = "FAILED" + case running = "RUNNING" + public var description: String { return self.rawValue } + } + + public enum LongConditionOperator: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case equalsTo = "EQUALS_TO" + case greaterThanEqualTo = "GREATER_THAN_EQUAL_TO" + case lessThanEqualTo = "LESS_THAN_EQUAL_TO" + case notEqualsTo = "NOT_EQUALS_TO" + public var description: String { return self.rawValue } + } + + public enum ResourceType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case ebs = "EBS" + case s3 = "S3" + public var description: String { return self.rawValue } + } + + public enum SearchJobState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case completed = "COMPLETED" + case failed = "FAILED" + case running = "RUNNING" + case stopped = "STOPPED" + case stopping = "STOPPING" + public var description: String { return self.rawValue } + } + + public enum StringConditionOperator: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case beginsWith = "BEGINS_WITH" + case contains = "CONTAINS" + case doesNotBeginWith = "DOES_NOT_BEGIN_WITH" + case doesNotContain = "DOES_NOT_CONTAIN" + case doesNotEndWith = "DOES_NOT_END_WITH" + case endsWith = "ENDS_WITH" + case equalsTo = "EQUALS_TO" + case notEqualsTo = "NOT_EQUALS_TO" + public var description: String { return self.rawValue } + } + + public enum TimeConditionOperator: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case equalsTo = "EQUALS_TO" + case greaterThanEqualTo = "GREATER_THAN_EQUAL_TO" + case lessThanEqualTo = "LESS_THAN_EQUAL_TO" + case notEqualsTo = "NOT_EQUALS_TO" + public var description: String { return self.rawValue } + } + + public enum ResultItem: AWSDecodableShape, Sendable { + /// These are items returned in the search results of an Amazon EBS search. + case ebsResultItem(EBSResultItem) + /// These are items returned in the search results of an Amazon S3 search. + case s3ResultItem(S3ResultItem) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .ebsResultItem: + let value = try container.decode(EBSResultItem.self, forKey: .ebsResultItem) + self = .ebsResultItem(value) + case .s3ResultItem: + let value = try container.decode(S3ResultItem.self, forKey: .s3ResultItem) + self = .s3ResultItem(value) + } + } + + private enum CodingKeys: String, CodingKey { + case ebsResultItem = "EBSResultItem" + case s3ResultItem = "S3ResultItem" + } + } + + // MARK: Shapes + + public struct BackupCreationTimeFilter: AWSEncodableShape & AWSDecodableShape { + /// This timestamp includes recovery points only created after the specified time. + public let createdAfter: Date? + /// This timestamp includes recovery points only created before the specified time. + public let createdBefore: Date? + + @inlinable + public init(createdAfter: Date? = nil, createdBefore: Date? = nil) { + self.createdAfter = createdAfter + self.createdBefore = createdBefore + } + + private enum CodingKeys: String, CodingKey { + case createdAfter = "CreatedAfter" + case createdBefore = "CreatedBefore" + } + } + + public struct CurrentSearchProgress: AWSDecodableShape { + /// This number is the sum of all items that match the item filters in a search job in progress. + public let itemsMatchedCount: Int64? + /// This number is the sum of all items that have been scanned so far during a search job. + public let itemsScannedCount: Int64? + /// This number is the sum of all backups that have been scanned so far during a search job. + public let recoveryPointsScannedCount: Int? + + @inlinable + public init(itemsMatchedCount: Int64? = nil, itemsScannedCount: Int64? = nil, recoveryPointsScannedCount: Int? = nil) { + self.itemsMatchedCount = itemsMatchedCount + self.itemsScannedCount = itemsScannedCount + self.recoveryPointsScannedCount = recoveryPointsScannedCount + } + + private enum CodingKeys: String, CodingKey { + case itemsMatchedCount = "ItemsMatchedCount" + case itemsScannedCount = "ItemsScannedCount" + case recoveryPointsScannedCount = "RecoveryPointsScannedCount" + } + } + + public struct EBSItemFilter: AWSEncodableShape & AWSDecodableShape { + /// You can include 1 to 10 values. If one is included, the results will return only items that match. If more than one is included, the results will return all items that match any of the included values. + public let creationTimes: [TimeCondition]? + /// You can include 1 to 10 values. If one file path is included, the results will return only items that match the file path. If more than one file path is included, the results will return all items that match any of the file paths. + public let filePaths: [StringCondition]? + /// You can include 1 to 10 values. If one is included, the results will return only items that match. If more than one is included, the results will return all items that match any of the included values. + public let lastModificationTimes: [TimeCondition]? + /// You can include 1 to 10 values. If one is included, the results will return only items that match. If more than one is included, the results will return all items that match any of the included values. + public let sizes: [LongCondition]? + + @inlinable + public init(creationTimes: [TimeCondition]? = nil, filePaths: [StringCondition]? = nil, lastModificationTimes: [TimeCondition]? = nil, sizes: [LongCondition]? = nil) { + self.creationTimes = creationTimes + self.filePaths = filePaths + self.lastModificationTimes = lastModificationTimes + self.sizes = sizes + } + + public func validate(name: String) throws { + try self.validate(self.creationTimes, name: "creationTimes", parent: name, max: 10) + try self.validate(self.creationTimes, name: "creationTimes", parent: name, min: 1) + try self.validate(self.filePaths, name: "filePaths", parent: name, max: 10) + try self.validate(self.filePaths, name: "filePaths", parent: name, min: 1) + try self.validate(self.lastModificationTimes, name: "lastModificationTimes", parent: name, max: 10) + try self.validate(self.lastModificationTimes, name: "lastModificationTimes", parent: name, min: 1) + try self.validate(self.sizes, name: "sizes", parent: name, max: 10) + try self.validate(self.sizes, name: "sizes", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case creationTimes = "CreationTimes" + case filePaths = "FilePaths" + case lastModificationTimes = "LastModificationTimes" + case sizes = "Sizes" + } + } + + public struct EBSResultItem: AWSDecodableShape { + /// These are one or more items in the results that match values for the Amazon Resource Name (ARN) of recovery points returned in a search of Amazon EBS backup metadata. + public let backupResourceArn: String? + /// The name of the backup vault. + public let backupVaultName: String? + /// These are one or more items in the results that match values for creation times returned in a search of Amazon EBS backup metadata. + public let creationTime: Date? + /// These are one or more items in the results that match values for file paths returned in a search of Amazon EBS backup metadata. + public let filePath: String? + /// These are one or more items in the results that match values for file sizes returned in a search of Amazon EBS backup metadata. + public let fileSize: Int64? + /// These are one or more items in the results that match values for file systems returned in a search of Amazon EBS backup metadata. + public let fileSystemIdentifier: String? + /// These are one or more items in the results that match values for Last Modified Time returned in a search of Amazon EBS backup metadata. + public let lastModifiedTime: Date? + /// These are one or more items in the results that match values for the Amazon Resource Name (ARN) of source resources returned in a search of Amazon EBS backup metadata. + public let sourceResourceArn: String? + + @inlinable + public init(backupResourceArn: String? = nil, backupVaultName: String? = nil, creationTime: Date? = nil, filePath: String? = nil, fileSize: Int64? = nil, fileSystemIdentifier: String? = nil, lastModifiedTime: Date? = nil, sourceResourceArn: String? = nil) { + self.backupResourceArn = backupResourceArn + self.backupVaultName = backupVaultName + self.creationTime = creationTime + self.filePath = filePath + self.fileSize = fileSize + self.fileSystemIdentifier = fileSystemIdentifier + self.lastModifiedTime = lastModifiedTime + self.sourceResourceArn = sourceResourceArn + } + + private enum CodingKeys: String, CodingKey { + case backupResourceArn = "BackupResourceArn" + case backupVaultName = "BackupVaultName" + case creationTime = "CreationTime" + case filePath = "FilePath" + case fileSize = "FileSize" + case fileSystemIdentifier = "FileSystemIdentifier" + case lastModifiedTime = "LastModifiedTime" + case sourceResourceArn = "SourceResourceArn" + } + } + + public struct ExportJobSummary: AWSDecodableShape { + /// This is a timestamp of the time the export job compeleted. + public let completionTime: Date? + /// This is a timestamp of the time the export job was created. + public let creationTime: Date? + /// This is the unique ARN (Amazon Resource Name) that belongs to the new export job. + public let exportJobArn: String? + /// This is the unique string that identifies a specific export job. + public let exportJobIdentifier: String + /// The unique string that identifies the Amazon Resource Name (ARN) of the specified search job. + public let searchJobArn: String? + /// The status of the export job is one of the following: CREATED; RUNNING; FAILED; or COMPLETED. + public let status: ExportJobStatus? + /// A status message is a string that is returned for an export job. A status message is included for any status other than COMPLETED without issues. + public let statusMessage: String? + + @inlinable + public init(completionTime: Date? = nil, creationTime: Date? = nil, exportJobArn: String? = nil, exportJobIdentifier: String, searchJobArn: String? = nil, status: ExportJobStatus? = nil, statusMessage: String? = nil) { + self.completionTime = completionTime + self.creationTime = creationTime + self.exportJobArn = exportJobArn + self.exportJobIdentifier = exportJobIdentifier + self.searchJobArn = searchJobArn + self.status = status + self.statusMessage = statusMessage + } + + private enum CodingKeys: String, CodingKey { + case completionTime = "CompletionTime" + case creationTime = "CreationTime" + case exportJobArn = "ExportJobArn" + case exportJobIdentifier = "ExportJobIdentifier" + case searchJobArn = "SearchJobArn" + case status = "Status" + case statusMessage = "StatusMessage" + } + } + + public struct GetSearchJobInput: AWSEncodableShape { + /// Required unique string that specifies the search job. + public let searchJobIdentifier: String + + @inlinable + public init(searchJobIdentifier: String) { + self.searchJobIdentifier = searchJobIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.searchJobIdentifier, key: "SearchJobIdentifier") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetSearchJobOutput: AWSDecodableShape { + /// The date and time that a search job completed, in Unix format and Coordinated Universal Time (UTC). The value of CompletionTime is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM. + public let completionTime: Date? + /// The date and time that a search job was created, in Unix format and Coordinated Universal Time (UTC). The value of CompletionTime is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM. + public let creationTime: Date + /// Returns numbers representing BackupsScannedCount, ItemsScanned, and ItemsMatched. + public let currentSearchProgress: CurrentSearchProgress? + /// The encryption key for the specified search job. Example: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab. + public let encryptionKeyArn: String? + /// Item Filters represent all input item properties specified when the search was created. + public let itemFilters: ItemFilters + /// Returned name of the specified search job. + public let name: String? + /// The unique string that identifies the Amazon Resource Name (ARN) of the specified search job. + public let searchJobArn: String + /// The unique string that identifies the specified search job. + public let searchJobIdentifier: String + /// The search scope is all backup properties input into a search. + public let searchScope: SearchScope + /// Returned summary of the specified search job scope, including: TotalBackupsToScanCount, the number of recovery points returned by the search. TotalItemsToScanCount, the number of items returned by the search. + public let searchScopeSummary: SearchScopeSummary? + /// The current status of the specified search job. A search job may have one of the following statuses: RUNNING; COMPLETED; STOPPED; FAILED; TIMED_OUT; or EXPIRED . + public let status: SearchJobState + /// A status message will be returned for either a earch job with a status of ERRORED or a status of COMPLETED jobs with issues. For example, a message may say that a search contained recovery points unable to be scanned because of a permissions issue. + public let statusMessage: String? + + @inlinable + public init(completionTime: Date? = nil, creationTime: Date, currentSearchProgress: CurrentSearchProgress? = nil, encryptionKeyArn: String? = nil, itemFilters: ItemFilters, name: String? = nil, searchJobArn: String, searchJobIdentifier: String, searchScope: SearchScope, searchScopeSummary: SearchScopeSummary? = nil, status: SearchJobState, statusMessage: String? = nil) { + self.completionTime = completionTime + self.creationTime = creationTime + self.currentSearchProgress = currentSearchProgress + self.encryptionKeyArn = encryptionKeyArn + self.itemFilters = itemFilters + self.name = name + self.searchJobArn = searchJobArn + self.searchJobIdentifier = searchJobIdentifier + self.searchScope = searchScope + self.searchScopeSummary = searchScopeSummary + self.status = status + self.statusMessage = statusMessage + } + + private enum CodingKeys: String, CodingKey { + case completionTime = "CompletionTime" + case creationTime = "CreationTime" + case currentSearchProgress = "CurrentSearchProgress" + case encryptionKeyArn = "EncryptionKeyArn" + case itemFilters = "ItemFilters" + case name = "Name" + case searchJobArn = "SearchJobArn" + case searchJobIdentifier = "SearchJobIdentifier" + case searchScope = "SearchScope" + case searchScopeSummary = "SearchScopeSummary" + case status = "Status" + case statusMessage = "StatusMessage" + } + } + + public struct GetSearchResultExportJobInput: AWSEncodableShape { + /// This is the unique string that identifies a specific export job. Required for this operation. + public let exportJobIdentifier: String + + @inlinable + public init(exportJobIdentifier: String) { + self.exportJobIdentifier = exportJobIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.exportJobIdentifier, key: "ExportJobIdentifier") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetSearchResultExportJobOutput: AWSDecodableShape { + /// The date and time that an export job completed, in Unix format and Coordinated Universal Time (UTC). The value of CreationTime is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM. + public let completionTime: Date? + /// The date and time that an export job was created, in Unix format and Coordinated Universal Time (UTC). The value of CreationTime is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM. + public let creationTime: Date? + /// The unique Amazon Resource Name (ARN) that uniquely identifies the export job. + public let exportJobArn: String? + /// This is the unique string that identifies the specified export job. + public let exportJobIdentifier: String + /// The export specification consists of the destination S3 bucket to which the search results were exported, along with the destination prefix. + public let exportSpecification: ExportSpecification? + /// The unique string that identifies the Amazon Resource Name (ARN) of the specified search job. + public let searchJobArn: String? + /// This is the current status of the export job. + public let status: ExportJobStatus? + /// A status message is a string that is returned for search job with a status of FAILED, along with steps to remedy and retry the operation. + public let statusMessage: String? + + @inlinable + public init(completionTime: Date? = nil, creationTime: Date? = nil, exportJobArn: String? = nil, exportJobIdentifier: String, exportSpecification: ExportSpecification? = nil, searchJobArn: String? = nil, status: ExportJobStatus? = nil, statusMessage: String? = nil) { + self.completionTime = completionTime + self.creationTime = creationTime + self.exportJobArn = exportJobArn + self.exportJobIdentifier = exportJobIdentifier + self.exportSpecification = exportSpecification + self.searchJobArn = searchJobArn + self.status = status + self.statusMessage = statusMessage + } + + private enum CodingKeys: String, CodingKey { + case completionTime = "CompletionTime" + case creationTime = "CreationTime" + case exportJobArn = "ExportJobArn" + case exportJobIdentifier = "ExportJobIdentifier" + case exportSpecification = "ExportSpecification" + case searchJobArn = "SearchJobArn" + case status = "Status" + case statusMessage = "StatusMessage" + } + } + + public struct ItemFilters: AWSEncodableShape & AWSDecodableShape { + /// This array can contain CreationTimes, FilePaths, LastModificationTimes, or Sizes objects. + public let ebsItemFilters: [EBSItemFilter]? + /// This array can contain CreationTimes, ETags, ObjectKeys, Sizes, or VersionIds objects. + public let s3ItemFilters: [S3ItemFilter]? + + @inlinable + public init(ebsItemFilters: [EBSItemFilter]? = nil, s3ItemFilters: [S3ItemFilter]? = nil) { + self.ebsItemFilters = ebsItemFilters + self.s3ItemFilters = s3ItemFilters + } + + public func validate(name: String) throws { + try self.ebsItemFilters?.forEach { + try $0.validate(name: "\(name).ebsItemFilters[]") + } + try self.validate(self.ebsItemFilters, name: "ebsItemFilters", parent: name, max: 10) + try self.s3ItemFilters?.forEach { + try $0.validate(name: "\(name).s3ItemFilters[]") + } + try self.validate(self.s3ItemFilters, name: "s3ItemFilters", parent: name, max: 10) + } + + private enum CodingKeys: String, CodingKey { + case ebsItemFilters = "EBSItemFilters" + case s3ItemFilters = "S3ItemFilters" + } + } + + public struct ListSearchJobBackupsInput: AWSEncodableShape { + /// The maximum number of resource list items to be returned. + public let maxResults: Int? + /// The next item following a partial list of returned backups included in a search job. For example, if a request is made to return MaxResults number of backups, NextToken allows you to return more items in your list starting at the location pointed to by the next token. + public let nextToken: String? + /// The unique string that specifies the search job. + public let searchJobIdentifier: String + + @inlinable + public init(maxResults: Int? = nil, nextToken: String? = nil, searchJobIdentifier: String) { + self.maxResults = maxResults + self.nextToken = nextToken + self.searchJobIdentifier = searchJobIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodePath(self.searchJobIdentifier, key: "SearchJobIdentifier") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListSearchJobBackupsOutput: AWSDecodableShape { + /// The next item following a partial list of returned backups included in a search job. For example, if a request is made to return MaxResults number of backups, NextToken allows you to return more items in your list starting at the location pointed to by the next token. + public let nextToken: String? + /// The recovery points returned the results of a search job + public let results: [SearchJobBackupsResult] + + @inlinable + public init(nextToken: String? = nil, results: [SearchJobBackupsResult]) { + self.nextToken = nextToken + self.results = results + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case results = "Results" + } + } + + public struct ListSearchJobResultsInput: AWSEncodableShape { + /// The maximum number of resource list items to be returned. + public let maxResults: Int? + /// The next item following a partial list of returned search job results. For example, if a request is made to return MaxResults number of search job results, NextToken allows you to return more items in your list starting at the location pointed to by the next token. + public let nextToken: String? + /// The unique string that specifies the search job. + public let searchJobIdentifier: String + + @inlinable + public init(maxResults: Int? = nil, nextToken: String? = nil, searchJobIdentifier: String) { + self.maxResults = maxResults + self.nextToken = nextToken + self.searchJobIdentifier = searchJobIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodePath(self.searchJobIdentifier, key: "SearchJobIdentifier") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListSearchJobResultsOutput: AWSDecodableShape { + /// The next item following a partial list of search job results. For example, if a request is made to return MaxResults number of backups, NextToken allows you to return more items in your list starting at the location pointed to by the next token. + public let nextToken: String? + /// The results consist of either EBSResultItem or S3ResultItem. + public let results: [ResultItem] + + @inlinable + public init(nextToken: String? = nil, results: [ResultItem]) { + self.nextToken = nextToken + self.results = results + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case results = "Results" + } + } + + public struct ListSearchJobsInput: AWSEncodableShape { + /// Include this parameter to filter list by search job status. + public let byStatus: SearchJobState? + /// The maximum number of resource list items to be returned. + public let maxResults: Int? + /// The next item following a partial list of returned search jobs. For example, if a request is made to return MaxResults number of backups, NextToken allows you to return more items in your list starting at the location pointed to by the next token. + public let nextToken: String? + + @inlinable + public init(byStatus: SearchJobState? = nil, maxResults: Int? = nil, nextToken: String? = nil) { + self.byStatus = byStatus + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.byStatus, key: "Status") + request.encodeQuery(self.maxResults, key: "MaxResults") + request.encodeQuery(self.nextToken, key: "NextToken") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListSearchJobsOutput: AWSDecodableShape { + /// The next item following a partial list of returned backups included in a search job. For example, if a request is made to return MaxResults number of backups, NextToken allows you to return more items in your list starting at the location pointed to by the next token. + public let nextToken: String? + /// The search jobs among the list, with details of the returned search jobs. + public let searchJobs: [SearchJobSummary] + + @inlinable + public init(nextToken: String? = nil, searchJobs: [SearchJobSummary]) { + self.nextToken = nextToken + self.searchJobs = searchJobs + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case searchJobs = "SearchJobs" + } + } + + public struct ListSearchResultExportJobsInput: AWSEncodableShape { + /// The maximum number of resource list items to be returned. + public let maxResults: Int? + /// The next item following a partial list of returned backups included in a search job. For example, if a request is made to return MaxResults number of backups, NextToken allows you to return more items in your list starting at the location pointed to by the next token. + public let nextToken: String? + /// The unique string that specifies the search job. + public let searchJobIdentifier: String? + /// The search jobs to be included in the export job can be filtered by including this parameter. + public let status: ExportJobStatus? + + @inlinable + public init(maxResults: Int? = nil, nextToken: String? = nil, searchJobIdentifier: String? = nil, status: ExportJobStatus? = nil) { + self.maxResults = maxResults + self.nextToken = nextToken + self.searchJobIdentifier = searchJobIdentifier + self.status = status + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.maxResults, key: "MaxResults") + request.encodeQuery(self.nextToken, key: "NextToken") + request.encodeQuery(self.searchJobIdentifier, key: "SearchJobIdentifier") + request.encodeQuery(self.status, key: "Status") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListSearchResultExportJobsOutput: AWSDecodableShape { + /// The operation returns the included export jobs. + public let exportJobs: [ExportJobSummary] + /// The next item following a partial list of returned backups included in a search job. For example, if a request is made to return MaxResults number of backups, NextToken allows you to return more items in your list starting at the location pointed to by the next token. + public let nextToken: String? + + @inlinable + public init(exportJobs: [ExportJobSummary], nextToken: String? = nil) { + self.exportJobs = exportJobs + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case exportJobs = "ExportJobs" + case nextToken = "NextToken" + } + } + + public struct ListTagsForResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) that uniquely identifies the resource.> + public let resourceArn: String + + @inlinable + public init(resourceArn: String) { + self.resourceArn = resourceArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "ResourceArn") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListTagsForResourceResponse: AWSDecodableShape { + /// List of tags returned by the operation. + public let tags: [String: String]? + + @inlinable + public init(tags: [String: String]? = nil) { + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case tags = "Tags" + } + } + + public struct LongCondition: AWSEncodableShape & AWSDecodableShape { + /// A string that defines what values will be returned. If this is included, avoid combinations of operators that will return all possible values. For example, including both EQUALS_TO and NOT_EQUALS_TO with a value of 4 will return all values. + public let `operator`: LongConditionOperator? + /// The value of an item included in one of the search item filters. + public let value: Int64 + + @inlinable + public init(operator: LongConditionOperator? = nil, value: Int64) { + self.`operator` = `operator` + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case `operator` = "Operator" + case value = "Value" + } + } + + public struct S3ExportSpecification: AWSEncodableShape & AWSDecodableShape { + /// This specifies the destination Amazon S3 bucket for the export job. + public let destinationBucket: String + /// This specifies the prefix for the destination Amazon S3 bucket for the export job. + public let destinationPrefix: String? + + @inlinable + public init(destinationBucket: String, destinationPrefix: String? = nil) { + self.destinationBucket = destinationBucket + self.destinationPrefix = destinationPrefix + } + + private enum CodingKeys: String, CodingKey { + case destinationBucket = "DestinationBucket" + case destinationPrefix = "DestinationPrefix" + } + } + + public struct S3ItemFilter: AWSEncodableShape & AWSDecodableShape { + /// You can include 1 to 10 values. If one value is included, the results will return only items that match the value. If more than one value is included, the results will return all items that match any of the values. + public let creationTimes: [TimeCondition]? + /// You can include 1 to 10 values. If one value is included, the results will return only items that match the value. If more than one value is included, the results will return all items that match any of the values. + public let eTags: [StringCondition]? + /// You can include 1 to 10 values. If one value is included, the results will return only items that match the value. If more than one value is included, the results will return all items that match any of the values. + public let objectKeys: [StringCondition]? + /// You can include 1 to 10 values. If one value is included, the results will return only items that match the value. If more than one value is included, the results will return all items that match any of the values. + public let sizes: [LongCondition]? + /// You can include 1 to 10 values. If one value is included, the results will return only items that match the value. If more than one value is included, the results will return all items that match any of the values. + public let versionIds: [StringCondition]? + + @inlinable + public init(creationTimes: [TimeCondition]? = nil, eTags: [StringCondition]? = nil, objectKeys: [StringCondition]? = nil, sizes: [LongCondition]? = nil, versionIds: [StringCondition]? = nil) { + self.creationTimes = creationTimes + self.eTags = eTags + self.objectKeys = objectKeys + self.sizes = sizes + self.versionIds = versionIds + } + + public func validate(name: String) throws { + try self.validate(self.creationTimes, name: "creationTimes", parent: name, max: 10) + try self.validate(self.creationTimes, name: "creationTimes", parent: name, min: 1) + try self.validate(self.eTags, name: "eTags", parent: name, max: 10) + try self.validate(self.eTags, name: "eTags", parent: name, min: 1) + try self.validate(self.objectKeys, name: "objectKeys", parent: name, max: 10) + try self.validate(self.objectKeys, name: "objectKeys", parent: name, min: 1) + try self.validate(self.sizes, name: "sizes", parent: name, max: 10) + try self.validate(self.sizes, name: "sizes", parent: name, min: 1) + try self.validate(self.versionIds, name: "versionIds", parent: name, max: 10) + try self.validate(self.versionIds, name: "versionIds", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case creationTimes = "CreationTimes" + case eTags = "ETags" + case objectKeys = "ObjectKeys" + case sizes = "Sizes" + case versionIds = "VersionIds" + } + } + + public struct S3ResultItem: AWSDecodableShape { + /// These are items in the returned results that match recovery point Amazon Resource Names (ARN) input during a search of Amazon S3 backup metadata. + public let backupResourceArn: String? + /// The name of the backup vault. + public let backupVaultName: String? + /// These are one or more items in the returned results that match values for item creation time input during a search of Amazon S3 backup metadata. + public let creationTime: Date? + /// These are one or more items in the returned results that match values for ETags input during a search of Amazon S3 backup metadata. + public let eTag: String? + /// This is one or more items returned in the results of a search of Amazon S3 backup metadata that match the values input for object key. + public let objectKey: String? + /// These are items in the returned results that match values for object size(s) input during a search of Amazon S3 backup metadata. + public let objectSize: Int64? + /// These are items in the returned results that match source Amazon Resource Names (ARN) input during a search of Amazon S3 backup metadata. + public let sourceResourceArn: String? + /// These are one or more items in the returned results that match values for version IDs input during a search of Amazon S3 backup metadata. + public let versionId: String? + + @inlinable + public init(backupResourceArn: String? = nil, backupVaultName: String? = nil, creationTime: Date? = nil, eTag: String? = nil, objectKey: String? = nil, objectSize: Int64? = nil, sourceResourceArn: String? = nil, versionId: String? = nil) { + self.backupResourceArn = backupResourceArn + self.backupVaultName = backupVaultName + self.creationTime = creationTime + self.eTag = eTag + self.objectKey = objectKey + self.objectSize = objectSize + self.sourceResourceArn = sourceResourceArn + self.versionId = versionId + } + + private enum CodingKeys: String, CodingKey { + case backupResourceArn = "BackupResourceArn" + case backupVaultName = "BackupVaultName" + case creationTime = "CreationTime" + case eTag = "ETag" + case objectKey = "ObjectKey" + case objectSize = "ObjectSize" + case sourceResourceArn = "SourceResourceArn" + case versionId = "VersionId" + } + } + + public struct SearchJobBackupsResult: AWSDecodableShape { + /// This is the creation time of the backup (recovery point). + public let backupCreationTime: Date? + /// The Amazon Resource Name (ARN) that uniquely identifies the backup resources. + public let backupResourceArn: String? + /// This is the creation time of the backup index. + public let indexCreationTime: Date? + /// This is the resource type of the search. + public let resourceType: ResourceType? + /// The Amazon Resource Name (ARN) that uniquely identifies the source resources. + public let sourceResourceArn: String? + /// This is the status of the search job backup result. + public let status: SearchJobState? + /// This is the status message included with the results. + public let statusMessage: String? + + @inlinable + public init(backupCreationTime: Date? = nil, backupResourceArn: String? = nil, indexCreationTime: Date? = nil, resourceType: ResourceType? = nil, sourceResourceArn: String? = nil, status: SearchJobState? = nil, statusMessage: String? = nil) { + self.backupCreationTime = backupCreationTime + self.backupResourceArn = backupResourceArn + self.indexCreationTime = indexCreationTime + self.resourceType = resourceType + self.sourceResourceArn = sourceResourceArn + self.status = status + self.statusMessage = statusMessage + } + + private enum CodingKeys: String, CodingKey { + case backupCreationTime = "BackupCreationTime" + case backupResourceArn = "BackupResourceArn" + case indexCreationTime = "IndexCreationTime" + case resourceType = "ResourceType" + case sourceResourceArn = "SourceResourceArn" + case status = "Status" + case statusMessage = "StatusMessage" + } + } + + public struct SearchJobSummary: AWSDecodableShape { + /// This is the completion time of the search job. + public let completionTime: Date? + /// This is the creation time of the search job. + public let creationTime: Date? + /// This is the name of the search job. + public let name: String? + /// The unique string that identifies the Amazon Resource Name (ARN) of the specified search job. + public let searchJobArn: String? + /// The unique string that specifies the search job. + public let searchJobIdentifier: String? + /// Returned summary of the specified search job scope, including: TotalBackupsToScanCount, the number of recovery points returned by the search. TotalItemsToScanCount, the number of items returned by the search. + public let searchScopeSummary: SearchScopeSummary? + /// This is the status of the search job. + public let status: SearchJobState? + /// A status message will be returned for either a earch job with a status of ERRORED or a status of COMPLETED jobs with issues. For example, a message may say that a search contained recovery points unable to be scanned because of a permissions issue. + public let statusMessage: String? + + @inlinable + public init(completionTime: Date? = nil, creationTime: Date? = nil, name: String? = nil, searchJobArn: String? = nil, searchJobIdentifier: String? = nil, searchScopeSummary: SearchScopeSummary? = nil, status: SearchJobState? = nil, statusMessage: String? = nil) { + self.completionTime = completionTime + self.creationTime = creationTime + self.name = name + self.searchJobArn = searchJobArn + self.searchJobIdentifier = searchJobIdentifier + self.searchScopeSummary = searchScopeSummary + self.status = status + self.statusMessage = statusMessage + } + + private enum CodingKeys: String, CodingKey { + case completionTime = "CompletionTime" + case creationTime = "CreationTime" + case name = "Name" + case searchJobArn = "SearchJobArn" + case searchJobIdentifier = "SearchJobIdentifier" + case searchScopeSummary = "SearchScopeSummary" + case status = "Status" + case statusMessage = "StatusMessage" + } + } + + public struct SearchScope: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) that uniquely identifies the backup resources. + public let backupResourceArns: [String]? + /// This is the time a backup resource was created. + public let backupResourceCreationTime: BackupCreationTimeFilter? + /// These are one or more tags on the backup (recovery point). + public let backupResourceTags: [String: String]? + /// The resource types included in a search. Eligible resource types include S3 and EBS. + public let backupResourceTypes: [ResourceType] + /// The Amazon Resource Name (ARN) that uniquely identifies the source resources. + public let sourceResourceArns: [String]? + + @inlinable + public init(backupResourceArns: [String]? = nil, backupResourceCreationTime: BackupCreationTimeFilter? = nil, backupResourceTags: [String: String]? = nil, backupResourceTypes: [ResourceType], sourceResourceArns: [String]? = nil) { + self.backupResourceArns = backupResourceArns + self.backupResourceCreationTime = backupResourceCreationTime + self.backupResourceTags = backupResourceTags + self.backupResourceTypes = backupResourceTypes + self.sourceResourceArns = sourceResourceArns + } + + public func validate(name: String) throws { + try self.validate(self.backupResourceArns, name: "backupResourceArns", parent: name, max: 50) + try self.validate(self.backupResourceTypes, name: "backupResourceTypes", parent: name, max: 1) + try self.validate(self.backupResourceTypes, name: "backupResourceTypes", parent: name, min: 1) + try self.validate(self.sourceResourceArns, name: "sourceResourceArns", parent: name, max: 50) + } + + private enum CodingKeys: String, CodingKey { + case backupResourceArns = "BackupResourceArns" + case backupResourceCreationTime = "BackupResourceCreationTime" + case backupResourceTags = "BackupResourceTags" + case backupResourceTypes = "BackupResourceTypes" + case sourceResourceArns = "SourceResourceArns" + } + } + + public struct SearchScopeSummary: AWSDecodableShape { + /// This is the count of the total number of items that will be scanned in a search. + public let totalItemsToScanCount: Int64? + /// This is the count of the total number of backups that will be scanned in a search. + public let totalRecoveryPointsToScanCount: Int? + + @inlinable + public init(totalItemsToScanCount: Int64? = nil, totalRecoveryPointsToScanCount: Int? = nil) { + self.totalItemsToScanCount = totalItemsToScanCount + self.totalRecoveryPointsToScanCount = totalRecoveryPointsToScanCount + } + + private enum CodingKeys: String, CodingKey { + case totalItemsToScanCount = "TotalItemsToScanCount" + case totalRecoveryPointsToScanCount = "TotalRecoveryPointsToScanCount" + } + } + + public struct StartSearchJobInput: AWSEncodableShape { + /// Include this parameter to allow multiple identical calls for idempotency. A client token is valid for 8 hours after the first request that uses it is completed. After this time, any request with the same token is treated as a new request. + public let clientToken: String? + /// The encryption key for the specified search job. + public let encryptionKeyArn: String? + /// Item Filters represent all input item properties specified when the search was created. Contains either EBSItemFilters or S3ItemFilters + public let itemFilters: ItemFilters? + /// Include alphanumeric characters to create a name for this search job. + public let name: String? + /// This object can contain BackupResourceTypes, BackupResourceArns, BackupResourceCreationTime, BackupResourceTags, and SourceResourceArns to filter the recovery points returned by the search job. + public let searchScope: SearchScope + /// List of tags returned by the operation. + public let tags: [String: String]? + + @inlinable + public init(clientToken: String? = nil, encryptionKeyArn: String? = nil, itemFilters: ItemFilters? = nil, name: String? = nil, searchScope: SearchScope, tags: [String: String]? = nil) { + self.clientToken = clientToken + self.encryptionKeyArn = encryptionKeyArn + self.itemFilters = itemFilters + self.name = name + self.searchScope = searchScope + self.tags = tags + } + + public func validate(name: String) throws { + try self.itemFilters?.validate(name: "\(name).itemFilters") + try self.searchScope.validate(name: "\(name).searchScope") + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "ClientToken" + case encryptionKeyArn = "EncryptionKeyArn" + case itemFilters = "ItemFilters" + case name = "Name" + case searchScope = "SearchScope" + case tags = "Tags" + } + } + + public struct StartSearchJobOutput: AWSDecodableShape { + /// The date and time that a job was created, in Unix format and Coordinated Universal Time (UTC). The value of CompletionTime is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM. + public let creationTime: Date? + /// The unique string that identifies the Amazon Resource Name (ARN) of the specified search job. + public let searchJobArn: String? + /// The unique string that specifies the search job. + public let searchJobIdentifier: String? + + @inlinable + public init(creationTime: Date? = nil, searchJobArn: String? = nil, searchJobIdentifier: String? = nil) { + self.creationTime = creationTime + self.searchJobArn = searchJobArn + self.searchJobIdentifier = searchJobIdentifier + } + + private enum CodingKeys: String, CodingKey { + case creationTime = "CreationTime" + case searchJobArn = "SearchJobArn" + case searchJobIdentifier = "SearchJobIdentifier" + } + } + + public struct StartSearchResultExportJobInput: AWSEncodableShape { + /// Include this parameter to allow multiple identical calls for idempotency. A client token is valid for 8 hours after the first request that uses it is completed. After this time, any request with the same token is treated as a new request. + public let clientToken: String? + /// This specification contains a required string of the destination bucket; optionally, you can include the destination prefix. + public let exportSpecification: ExportSpecification + /// This parameter specifies the role ARN used to start the search results export jobs. + public let roleArn: String? + /// The unique string that specifies the search job. + public let searchJobIdentifier: String + /// Optional tags to include. A tag is a key-value pair you can use to manage, filter, and search for your resources. Allowed characters include UTF-8 letters, numbers, spaces, and the following characters: + - = . _ : /. + public let tags: [String: String]? + + @inlinable + public init(clientToken: String? = nil, exportSpecification: ExportSpecification, roleArn: String? = nil, searchJobIdentifier: String, tags: [String: String]? = nil) { + self.clientToken = clientToken + self.exportSpecification = exportSpecification + self.roleArn = roleArn + self.searchJobIdentifier = searchJobIdentifier + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.roleArn, name: "roleArn", parent: name, max: 2048) + try self.validate(self.roleArn, name: "roleArn", parent: name, min: 20) + try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^arn:(?:aws|aws-cn|aws-us-gov):iam::[a-z0-9-]+:role/(.+)$") + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "ClientToken" + case exportSpecification = "ExportSpecification" + case roleArn = "RoleArn" + case searchJobIdentifier = "SearchJobIdentifier" + case tags = "Tags" + } + } + + public struct StartSearchResultExportJobOutput: AWSDecodableShape { + /// This is the unique ARN (Amazon Resource Name) that belongs to the new export job. + public let exportJobArn: String? + /// This is the unique identifier that specifies the new export job. + public let exportJobIdentifier: String + + @inlinable + public init(exportJobArn: String? = nil, exportJobIdentifier: String) { + self.exportJobArn = exportJobArn + self.exportJobIdentifier = exportJobIdentifier + } + + private enum CodingKeys: String, CodingKey { + case exportJobArn = "ExportJobArn" + case exportJobIdentifier = "ExportJobIdentifier" + } + } + + public struct StopSearchJobInput: AWSEncodableShape { + /// The unique string that specifies the search job. + public let searchJobIdentifier: String + + @inlinable + public init(searchJobIdentifier: String) { + self.searchJobIdentifier = searchJobIdentifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.searchJobIdentifier, key: "SearchJobIdentifier") + } + + private enum CodingKeys: CodingKey {} + } + + public struct StopSearchJobOutput: AWSDecodableShape { + public init() {} + } + + public struct StringCondition: AWSEncodableShape & AWSDecodableShape { + /// A string that defines what values will be returned. If this is included, avoid combinations of operators that will return all possible values. For example, including both EQUALS_TO and NOT_EQUALS_TO with a value of 4 will return all values. + public let `operator`: StringConditionOperator? + /// The value of the string. + public let value: String + + @inlinable + public init(operator: StringConditionOperator? = nil, value: String) { + self.`operator` = `operator` + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case `operator` = "Operator" + case value = "Value" + } + } + + public struct TagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) that uniquely identifies the resource. This is the resource that will have the indicated tags. + public let resourceArn: String + /// Required tags to include. A tag is a key-value pair you can use to manage, filter, and search for your resources. Allowed characters include UTF-8 letters, numbers, spaces, and the following characters: + - = . _ : /. + public let tags: [String: String] + + @inlinable + public init(resourceArn: String, tags: [String: String]) { + self.resourceArn = resourceArn + self.tags = tags + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "ResourceArn") + try container.encode(self.tags, forKey: .tags) + } + + private enum CodingKeys: String, CodingKey { + case tags = "Tags" + } + } + + public struct TagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct TimeCondition: AWSEncodableShape & AWSDecodableShape { + /// A string that defines what values will be returned. If this is included, avoid combinations of operators that will return all possible values. For example, including both EQUALS_TO and NOT_EQUALS_TO with a value of 4 will return all values. + public let `operator`: TimeConditionOperator? + /// This is the timestamp value of the time condition. + public let value: Date + + @inlinable + public init(operator: TimeConditionOperator? = nil, value: Date) { + self.`operator` = `operator` + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case `operator` = "Operator" + case value = "Value" + } + } + + public struct UntagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) that uniquely identifies the resource where you want to remove tags. + public let resourceArn: String + /// This required parameter contains the tag keys you want to remove from the source. + public let tagKeys: [String] + + @inlinable + public init(resourceArn: String, tagKeys: [String]) { + self.resourceArn = resourceArn + self.tagKeys = tagKeys + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "ResourceArn") + request.encodeQuery(self.tagKeys, key: "tagKeys") + } + + private enum CodingKeys: CodingKey {} + } + + public struct UntagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct ExportSpecification: AWSEncodableShape & AWSDecodableShape { + /// This specifies the destination Amazon S3 bucket for the export job. And, if included, it also specifies the destination prefix. + public let s3ExportSpecification: S3ExportSpecification? + + @inlinable + public init(s3ExportSpecification: S3ExportSpecification? = nil) { + self.s3ExportSpecification = s3ExportSpecification + } + + private enum CodingKeys: String, CodingKey { + case s3ExportSpecification = "s3ExportSpecification" + } + } +} + +// MARK: - Errors + +/// Error enum for BackupSearch +public struct BackupSearchErrorType: AWSErrorType { + enum Code: String { + case accessDeniedException = "AccessDeniedException" + case conflictException = "ConflictException" + case internalServerException = "InternalServerException" + case resourceNotFoundException = "ResourceNotFoundException" + case serviceQuotaExceededException = "ServiceQuotaExceededException" + case throttlingException = "ThrottlingException" + case validationException = "ValidationException" + } + + private let error: Code + public let context: AWSErrorContext? + + /// initialize BackupSearch + public init?(errorCode: String, context: AWSErrorContext) { + guard let error = Code(rawValue: errorCode) else { return nil } + self.error = error + self.context = context + } + + internal init(_ error: Code) { + self.error = error + self.context = nil + } + + /// return error code string + public var errorCode: String { self.error.rawValue } + + /// You do not have sufficient access to perform this action. + public static var accessDeniedException: Self { .init(.accessDeniedException) } + /// This exception occurs when a conflict with a previous successful operation is detected. This generally occurs when the previous operation did not have time to propagate to the host serving the current request. A retry (with appropriate backoff logic) is the recommended response to this exception. + public static var conflictException: Self { .init(.conflictException) } + /// An internal server error occurred. Retry your request. + public static var internalServerException: Self { .init(.internalServerException) } + /// The resource was not found for this request. Confirm the resource information, such as the ARN or type is correct and exists, then retry the request. + public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } + /// The request denied due to exceeding the quota limits permitted. + public static var serviceQuotaExceededException: Self { .init(.serviceQuotaExceededException) } + /// The request was denied due to request throttling. + public static var throttlingException: Self { .init(.throttlingException) } + /// The input fails to satisfy the constraints specified by a service. + public static var validationException: Self { .init(.validationException) } +} + +extension BackupSearchErrorType: Equatable { + public static func == (lhs: BackupSearchErrorType, rhs: BackupSearchErrorType) -> Bool { + lhs.error == rhs.error + } +} + +extension BackupSearchErrorType: CustomStringConvertible { + public var description: String { + return "\(self.error.rawValue): \(self.message ?? "")" + } +} diff --git a/Sources/Soto/Services/Batch/Batch_api.swift b/Sources/Soto/Services/Batch/Batch_api.swift index f6cb1737ee..b80e792f59 100644 --- a/Sources/Soto/Services/Batch/Batch_api.swift +++ b/Sources/Soto/Services/Batch/Batch_api.swift @@ -150,7 +150,7 @@ public struct Batch: AWSService { return try await self.cancelJob(input, logger: logger) } - /// Creates an Batch compute environment. You can create MANAGED or UNMANAGED compute environments. MANAGED compute environments can use Amazon EC2 or Fargate resources. UNMANAGED compute environments can only use EC2 resources. In a managed compute environment, Batch manages the capacity and instance types of the compute resources within the environment. This is based on the compute resource specification that you define or the launch template that you specify when you create the compute environment. Either, you can choose to use EC2 On-Demand Instances and EC2 Spot Instances. Or, you can use Fargate and Fargate Spot capacity in your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a specified percentage of the On-Demand price. Multi-node parallel jobs aren't supported on Spot Instances. In an unmanaged compute environment, you can manage your own EC2 compute resources and have flexibility with how you configure your compute resources. For example, you can use custom AMIs. However, you must verify that each of your AMIs meet the Amazon ECS container instance AMI specification. For more information, see container instance AMIs in the Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, launch your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS container instance in the Amazon Elastic Container Service Developer Guide. To create a compute environment that uses EKS resources, the caller must have permissions to call eks:DescribeCluster. Batch doesn't automatically upgrade the AMIs in a compute environment after it's created. For example, it also doesn't update the AMIs in your compute environment when a newer version of the Amazon ECS optimized AMI is available. You're responsible for the management of the guest operating system. This includes any updates and security patches. You're also responsible for any additional application software or utilities that you install on the compute resources. There are two ways to use a new AMI for your Batch jobs. The original method is to complete these steps: Create a new compute environment with the new AMI. Add the compute environment to an existing job queue. Remove the earlier compute environment from your job queue. Delete the earlier compute environment. In April 2022, Batch added enhanced support for updating compute environments. For more information, see Updating compute environments. To use the enhanced updating of compute environments to update AMIs, follow these rules: Either don't set the service role (serviceRole) parameter or set it to the AWSBatchServiceRole service-linked role. Set the allocation strategy (allocationStrategy) parameter to BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED, or SPOT_PRICE_CAPACITY_OPTIMIZED. Set the update to latest image version (updateToLatestImageVersion) parameter to true. The updateToLatestImageVersion parameter is used when you update a compute environment. This parameter is ignored when you create a compute environment. Don't specify an AMI ID in imageId, imageIdOverride (in ec2Configuration ), or in the launch template (launchTemplate). In that case, Batch selects the latest Amazon ECS optimized AMI that's supported by Batch at the time the infrastructure update is initiated. Alternatively, you can specify the AMI ID in the imageId or imageIdOverride parameters, or the launch template identified by the LaunchTemplate properties. Changing any of these properties starts an infrastructure update. If the AMI ID is specified in the launch template, it can't be replaced by specifying an AMI ID in either the imageId or imageIdOverride parameters. It can only be replaced by specifying a different launch template, or if the launch template version is set to $Default or $Latest, by setting either a new default version for the launch template (if $Default) or by adding a new version to the launch template (if $Latest). If these rules are followed, any update that starts an infrastructure update causes the AMI ID to be re-selected. If the version setting in the launch template (launchTemplate) is set to $Latest or $Default, the latest or default version of the launch template is evaluated up at the time of the infrastructure update, even if the launchTemplate wasn't updated. + /// Creates an Batch compute environment. You can create MANAGED or UNMANAGED compute environments. MANAGED compute environments can use Amazon EC2 or Fargate resources. UNMANAGED compute environments can only use EC2 resources. In a managed compute environment, Batch manages the capacity and instance types of the compute resources within the environment. This is based on the compute resource specification that you define or the launch template that you specify when you create the compute environment. Either, you can choose to use EC2 On-Demand Instances and EC2 Spot Instances. Or, you can use Fargate and Fargate Spot capacity in your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a specified percentage of the On-Demand price. Multi-node parallel jobs aren't supported on Spot Instances. In an unmanaged compute environment, you can manage your own EC2 compute resources and have flexibility with how you configure your compute resources. For example, you can use custom AMIs. However, you must verify that each of your AMIs meet the Amazon ECS container instance AMI specification. For more information, see container instance AMIs in the Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, launch your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS container instance in the Amazon Elastic Container Service Developer Guide. To create a compute environment that uses EKS resources, the caller must have permissions to call eks:DescribeCluster. Batch doesn't automatically upgrade the AMIs in a compute environment after it's created. For example, it also doesn't update the AMIs in your compute environment when a newer version of the Amazon ECS optimized AMI is available. You're responsible for the management of the guest operating system. This includes any updates and security patches. You're also responsible for any additional application software or utilities that you install on the compute resources. There are two ways to use a new AMI for your Batch jobs. The original method is to complete these steps: Create a new compute environment with the new AMI. Add the compute environment to an existing job queue. Remove the earlier compute environment from your job queue. Delete the earlier compute environment. In April 2022, Batch added enhanced support for updating compute environments. For more information, see Updating compute environments. To use the enhanced updating of compute environments to update AMIs, follow these rules: Either don't set the service role (serviceRole) parameter or set it to the AWSBatchServiceRole service-linked role. Set the allocation strategy (allocationStrategy) parameter to BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED, or SPOT_PRICE_CAPACITY_OPTIMIZED. Set the update to latest image version (updateToLatestImageVersion) parameter to true. The updateToLatestImageVersion parameter is used when you update a compute environment. This parameter is ignored when you create a compute environment. Don't specify an AMI ID in imageId, imageIdOverride (in ec2Configuration ), or in the launch template (launchTemplate). In that case, Batch selects the latest Amazon ECS optimized AMI that's supported by Batch at the time the infrastructure update is initiated. Alternatively, you can specify the AMI ID in the imageId or imageIdOverride parameters, or the launch template identified by the LaunchTemplate properties. Changing any of these properties starts an infrastructure update. If the AMI ID is specified in the launch template, it can't be replaced by specifying an AMI ID in either the imageId or imageIdOverride parameters. It can only be replaced by specifying a different launch template, or if the launch template version is set to $Default or $Latest, by setting either a new default version for the launch template (if $Default) or by adding a new version to the launch template (if $Latest). If these rules are followed, any update that starts an infrastructure update causes the AMI ID to be re-selected. If the version setting in the launch template (launchTemplate) is set to $Latest or $Default, the latest or default version of the launch template is evaluated up at the time of the infrastructure update, even if the launchTemplate wasn't updated. @Sendable @inlinable public func createComputeEnvironment(_ input: CreateComputeEnvironmentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateComputeEnvironmentResponse { @@ -163,7 +163,7 @@ public struct Batch: AWSService { logger: logger ) } - /// Creates an Batch compute environment. You can create MANAGED or UNMANAGED compute environments. MANAGED compute environments can use Amazon EC2 or Fargate resources. UNMANAGED compute environments can only use EC2 resources. In a managed compute environment, Batch manages the capacity and instance types of the compute resources within the environment. This is based on the compute resource specification that you define or the launch template that you specify when you create the compute environment. Either, you can choose to use EC2 On-Demand Instances and EC2 Spot Instances. Or, you can use Fargate and Fargate Spot capacity in your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a specified percentage of the On-Demand price. Multi-node parallel jobs aren't supported on Spot Instances. In an unmanaged compute environment, you can manage your own EC2 compute resources and have flexibility with how you configure your compute resources. For example, you can use custom AMIs. However, you must verify that each of your AMIs meet the Amazon ECS container instance AMI specification. For more information, see container instance AMIs in the Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, launch your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS container instance in the Amazon Elastic Container Service Developer Guide. To create a compute environment that uses EKS resources, the caller must have permissions to call eks:DescribeCluster. Batch doesn't automatically upgrade the AMIs in a compute environment after it's created. For example, it also doesn't update the AMIs in your compute environment when a newer version of the Amazon ECS optimized AMI is available. You're responsible for the management of the guest operating system. This includes any updates and security patches. You're also responsible for any additional application software or utilities that you install on the compute resources. There are two ways to use a new AMI for your Batch jobs. The original method is to complete these steps: Create a new compute environment with the new AMI. Add the compute environment to an existing job queue. Remove the earlier compute environment from your job queue. Delete the earlier compute environment. In April 2022, Batch added enhanced support for updating compute environments. For more information, see Updating compute environments. To use the enhanced updating of compute environments to update AMIs, follow these rules: Either don't set the service role (serviceRole) parameter or set it to the AWSBatchServiceRole service-linked role. Set the allocation strategy (allocationStrategy) parameter to BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED, or SPOT_PRICE_CAPACITY_OPTIMIZED. Set the update to latest image version (updateToLatestImageVersion) parameter to true. The updateToLatestImageVersion parameter is used when you update a compute environment. This parameter is ignored when you create a compute environment. Don't specify an AMI ID in imageId, imageIdOverride (in ec2Configuration ), or in the launch template (launchTemplate). In that case, Batch selects the latest Amazon ECS optimized AMI that's supported by Batch at the time the infrastructure update is initiated. Alternatively, you can specify the AMI ID in the imageId or imageIdOverride parameters, or the launch template identified by the LaunchTemplate properties. Changing any of these properties starts an infrastructure update. If the AMI ID is specified in the launch template, it can't be replaced by specifying an AMI ID in either the imageId or imageIdOverride parameters. It can only be replaced by specifying a different launch template, or if the launch template version is set to $Default or $Latest, by setting either a new default version for the launch template (if $Default) or by adding a new version to the launch template (if $Latest). If these rules are followed, any update that starts an infrastructure update causes the AMI ID to be re-selected. If the version setting in the launch template (launchTemplate) is set to $Latest or $Default, the latest or default version of the launch template is evaluated up at the time of the infrastructure update, even if the launchTemplate wasn't updated. + /// Creates an Batch compute environment. You can create MANAGED or UNMANAGED compute environments. MANAGED compute environments can use Amazon EC2 or Fargate resources. UNMANAGED compute environments can only use EC2 resources. In a managed compute environment, Batch manages the capacity and instance types of the compute resources within the environment. This is based on the compute resource specification that you define or the launch template that you specify when you create the compute environment. Either, you can choose to use EC2 On-Demand Instances and EC2 Spot Instances. Or, you can use Fargate and Fargate Spot capacity in your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a specified percentage of the On-Demand price. Multi-node parallel jobs aren't supported on Spot Instances. In an unmanaged compute environment, you can manage your own EC2 compute resources and have flexibility with how you configure your compute resources. For example, you can use custom AMIs. However, you must verify that each of your AMIs meet the Amazon ECS container instance AMI specification. For more information, see container instance AMIs in the Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, launch your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS container instance in the Amazon Elastic Container Service Developer Guide. To create a compute environment that uses EKS resources, the caller must have permissions to call eks:DescribeCluster. Batch doesn't automatically upgrade the AMIs in a compute environment after it's created. For example, it also doesn't update the AMIs in your compute environment when a newer version of the Amazon ECS optimized AMI is available. You're responsible for the management of the guest operating system. This includes any updates and security patches. You're also responsible for any additional application software or utilities that you install on the compute resources. There are two ways to use a new AMI for your Batch jobs. The original method is to complete these steps: Create a new compute environment with the new AMI. Add the compute environment to an existing job queue. Remove the earlier compute environment from your job queue. Delete the earlier compute environment. In April 2022, Batch added enhanced support for updating compute environments. For more information, see Updating compute environments. To use the enhanced updating of compute environments to update AMIs, follow these rules: Either don't set the service role (serviceRole) parameter or set it to the AWSBatchServiceRole service-linked role. Set the allocation strategy (allocationStrategy) parameter to BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED, or SPOT_PRICE_CAPACITY_OPTIMIZED. Set the update to latest image version (updateToLatestImageVersion) parameter to true. The updateToLatestImageVersion parameter is used when you update a compute environment. This parameter is ignored when you create a compute environment. Don't specify an AMI ID in imageId, imageIdOverride (in ec2Configuration ), or in the launch template (launchTemplate). In that case, Batch selects the latest Amazon ECS optimized AMI that's supported by Batch at the time the infrastructure update is initiated. Alternatively, you can specify the AMI ID in the imageId or imageIdOverride parameters, or the launch template identified by the LaunchTemplate properties. Changing any of these properties starts an infrastructure update. If the AMI ID is specified in the launch template, it can't be replaced by specifying an AMI ID in either the imageId or imageIdOverride parameters. It can only be replaced by specifying a different launch template, or if the launch template version is set to $Default or $Latest, by setting either a new default version for the launch template (if $Default) or by adding a new version to the launch template (if $Latest). If these rules are followed, any update that starts an infrastructure update causes the AMI ID to be re-selected. If the version setting in the launch template (launchTemplate) is set to $Latest or $Default, the latest or default version of the launch template is evaluated up at the time of the infrastructure update, even if the launchTemplate wasn't updated. /// /// Parameters: /// - computeEnvironmentName: The name for your compute environment. It can be up to 128 characters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), and underscores (_). @@ -800,7 +800,7 @@ public struct Batch: AWSService { /// - parameters: Additional parameters passed to the job that replace parameter substitution placeholders that are set in the job definition. Parameters are specified as a key and value pair mapping. Parameters in a SubmitJob request override any corresponding parameter defaults from the job definition. /// - propagateTags: Specifies whether to propagate the tags from the job or job definition to the corresponding Amazon ECS task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the tasks during task creation. For tags with the same name, job tags are given priority over job definitions tags. If the total number of combined tags from the job and job definition is over 50, the job is moved to the FAILED state. When specified, this overrides the tag propagation setting in the job definition. /// - retryStrategy: The retry strategy to use for failed jobs from this SubmitJob operation. When a retry strategy is specified here, it overrides the retry strategy defined in the job definition. - /// - schedulingPriorityOverride: The scheduling priority for the job. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. This overrides any scheduling priority in the job definition and works only within a single share identifier. The minimum supported value is 0 and the maximum supported value is 9999. + /// - schedulingPriorityOverride: The scheduling priority for the job. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. This overrides any scheduling priority in the job definition and works only within a single share identifier. The minimum supported value is 0 and the maximum supported value is 9999. /// - shareIdentifier: The share identifier for the job. Don't specify this parameter if the job queue doesn't have a scheduling policy. If the job queue has a scheduling policy, then this parameter must be specified. This string is limited to 255 alphanumeric characters, and can be followed by an asterisk (*). /// - tags: The tags that you apply to the job request to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging Amazon Web Services Resources in Amazon Web Services General Reference. /// - timeout: The timeout configuration for this SubmitJob operation. You can specify a timeout duration after which Batch terminates your jobs if they haven't finished. If a job is terminated due to a timeout, it isn't retried. The minimum value for the timeout is 60 seconds. This configuration overrides any timeout configuration specified in the job definition. For array jobs, child jobs have the same timeout configuration as the parent job. For more information, see Job Timeouts in the Amazon Elastic Container Service Developer Guide. diff --git a/Sources/Soto/Services/Batch/Batch_shapes.swift b/Sources/Soto/Services/Batch/Batch_shapes.swift index 6c33872c27..2e3d42d7ba 100644 --- a/Sources/Soto/Services/Batch/Batch_shapes.swift +++ b/Sources/Soto/Services/Batch/Batch_shapes.swift @@ -468,11 +468,11 @@ extension Batch { } public struct ComputeResource: AWSEncodableShape & AWSDecodableShape { - /// The allocation strategy to use for the compute resource if not enough instances of the best fitting instance type can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation strategies in the Batch User Guide. This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. BEST_FIT (default) Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost instance type. If additional instances of the selected instance type aren't available, Batch waits for the additional instances to be available. If there aren't enough instances available or the user is reaching Amazon EC2 service limits, additional jobs aren't run until the currently running jobs are completed. This allocation strategy keeps costs lower but can limit scaling. If you're using Spot Fleets with BEST_FIT, the Spot Fleet IAM Role must be specified. Compute resources that use a BEST_FIT allocation strategy don't support infrastructure updates and can't update some parameters. For more information, see Updating compute environments in the Batch User Guide. BEST_FIT_PROGRESSIVE Batch selects additional instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If additional instances of the previously selected instance types aren't available, Batch selects new instance types. SPOT_CAPACITY_OPTIMIZED Batch selects one or more instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources. SPOT_PRICE_CAPACITY_OPTIMIZED The price and capacity optimized allocation strategy looks at both price and capacity to select the Spot Instance pools that are the least likely to be interrupted and have the lowest possible price. This allocation strategy is only available for Spot Instance compute resources. With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. + /// The allocation strategy to use for the compute resource if not enough instances of the best fitting instance type can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation strategies in the Batch User Guide. This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. BEST_FIT (default) Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost instance type. If additional instances of the selected instance type aren't available, Batch waits for the additional instances to be available. If there aren't enough instances available or the user is reaching Amazon EC2 service limits, additional jobs aren't run until the currently running jobs are completed. This allocation strategy keeps costs lower but can limit scaling. If you're using Spot Fleets with BEST_FIT, the Spot Fleet IAM Role must be specified. Compute resources that use a BEST_FIT allocation strategy don't support infrastructure updates and can't update some parameters. For more information, see Updating compute environments in the Batch User Guide. BEST_FIT_PROGRESSIVE Batch selects additional instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If additional instances of the previously selected instance types aren't available, Batch selects new instance types. SPOT_CAPACITY_OPTIMIZED Batch selects one or more instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources. SPOT_PRICE_CAPACITY_OPTIMIZED The price and capacity optimized allocation strategy looks at both price and capacity to select the Spot Instance pools that are the least likely to be interrupted and have the lowest possible price. This allocation strategy is only available for Spot Instance compute resources. With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. public let allocationStrategy: CRAllocationStrategy? /// The maximum percentage that a Spot Instance price can be when compared with the On-Demand price for that instance type before instances are launched. For example, if your maximum percentage is 20%, then the Spot price must be less than 20% of the current On-Demand price for that Amazon EC2 instance. You always pay the lowest (market) price and never more than your maximum percentage. If you leave this field empty, the default value is 100% of the On-Demand price. For most use cases, we recommend leaving this field empty. This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. public let bidPercentage: Int? - /// The desired number of vCPUS in the compute environment. Batch modifies this value between the minimum and maximum values based on job queue demand. This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. + /// The desired number of vCPUS in the compute environment. Batch modifies this value between the minimum and maximum values based on job queue demand. This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. public let desiredvCpus: Int? /// Provides information that's used to select Amazon Machine Images (AMIs) for Amazon EC2 instances in the compute environment. If Ec2Configuration isn't specified, the default is ECS_AL2. One or two values can be provided. This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. public let ec2Configuration: [Ec2Configuration]? @@ -486,9 +486,9 @@ extension Batch { public let instanceTypes: [String]? /// The launch template to use for your compute resources. Any other compute resource parameters that you specify in a CreateComputeEnvironment API operation override the same parameters in the launch template. You must specify either the launch template ID or launch template name in the request, but not both. For more information, see Launch template support in the Batch User Guide. This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. public let launchTemplate: LaunchTemplateSpecification? - /// The maximum number of vCPUs that a compute environment can support. With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. + /// The maximum number of vCPUs that a compute environment can support. With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. public let maxvCpus: Int? - /// The minimum number of vCPUs that a compute environment should maintain (even if the compute environment is DISABLED). This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. + /// The minimum number of vCPUs that a compute environment should maintain (even if the compute environment is DISABLED). This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. public let minvCpus: Int? /// The Amazon EC2 placement group to associate with your compute resources. If you intend to submit multi-node parallel jobs to your compute environment, you should consider creating a cluster placement group and associate it with your compute resources. This keeps your multi-node parallel job on a logical grouping of instances within a single Availability Zone with high network flow potential. For more information, see Placement groups in the Amazon EC2 User Guide for Linux Instances. This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. public let placementGroup: String? @@ -574,11 +574,11 @@ extension Batch { } public struct ComputeResourceUpdate: AWSEncodableShape { - /// The allocation strategy to use for the compute resource if there's not enough instances of the best fitting instance type that can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation strategies in the Batch User Guide. When updating a compute environment, changing the allocation strategy requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide. BEST_FIT isn't supported when updating a compute environment. This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. BEST_FIT_PROGRESSIVE Batch selects additional instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If additional instances of the previously selected instance types aren't available, Batch selects new instance types. SPOT_CAPACITY_OPTIMIZED Batch selects one or more instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources. SPOT_PRICE_CAPACITY_OPTIMIZED The price and capacity optimized allocation strategy looks at both price and capacity to select the Spot Instance pools that are the least likely to be interrupted and have the lowest possible price. This allocation strategy is only available for Spot Instance compute resources. With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. + /// The allocation strategy to use for the compute resource if there's not enough instances of the best fitting instance type that can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation strategies in the Batch User Guide. When updating a compute environment, changing the allocation strategy requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide. BEST_FIT isn't supported when updating a compute environment. This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. BEST_FIT_PROGRESSIVE Batch selects additional instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If additional instances of the previously selected instance types aren't available, Batch selects new instance types. SPOT_CAPACITY_OPTIMIZED Batch selects one or more instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources. SPOT_PRICE_CAPACITY_OPTIMIZED The price and capacity optimized allocation strategy looks at both price and capacity to select the Spot Instance pools that are the least likely to be interrupted and have the lowest possible price. This allocation strategy is only available for Spot Instance compute resources. With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. public let allocationStrategy: CRUpdateAllocationStrategy? /// The maximum percentage that a Spot Instance price can be when compared with the On-Demand price for that instance type before instances are launched. For example, if your maximum percentage is 20%, the Spot price must be less than 20% of the current On-Demand price for that Amazon EC2 instance. You always pay the lowest (market) price and never more than your maximum percentage. For most use cases, we recommend leaving this field empty. When updating a compute environment, changing the bid percentage requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide. This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. public let bidPercentage: Int? - /// The desired number of vCPUS in the compute environment. Batch modifies this value between the minimum and maximum values based on job queue demand. This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. Batch doesn't support changing the desired number of vCPUs of an existing compute environment. Don't specify this parameter for compute environments using Amazon EKS clusters. When you update the desiredvCpus setting, the value must be between the minvCpus and maxvCpus values. Additionally, the updated desiredvCpus value must be greater than or equal to the current desiredvCpus value. For more information, see Troubleshooting Batch in the Batch User Guide. + /// The desired number of vCPUS in the compute environment. Batch modifies this value between the minimum and maximum values based on job queue demand. This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. Batch doesn't support changing the desired number of vCPUs of an existing compute environment. Don't specify this parameter for compute environments using Amazon EKS clusters. When you update the desiredvCpus setting, the value must be between the minvCpus and maxvCpus values. Additionally, the updated desiredvCpus value must be greater than or equal to the current desiredvCpus value. For more information, see Troubleshooting Batch in the Batch User Guide. public let desiredvCpus: Int? /// Provides information used to select Amazon Machine Images (AMIs) for Amazon EC2 instances in the compute environment. If Ec2Configuration isn't specified, the default is ECS_AL2. When updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide. To remove the Amazon EC2 configuration and any custom AMI ID specified in imageIdOverride, set this value to an empty string. One or two values can be provided. This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. public let ec2Configuration: [Ec2Configuration]? @@ -592,9 +592,9 @@ extension Batch { public let instanceTypes: [String]? /// The updated launch template to use for your compute resources. You must specify either the launch template ID or launch template name in the request, but not both. For more information, see Launch template support in the Batch User Guide. To remove the custom launch template and use the default launch template, set launchTemplateId or launchTemplateName member of the launch template specification to an empty string. Removing the launch template from a compute environment will not remove the AMI specified in the launch template. In order to update the AMI specified in a launch template, the updateToLatestImageVersion parameter must be set to true. When updating a compute environment, changing the launch template requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide. This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. public let launchTemplate: LaunchTemplateSpecification? - /// The maximum number of Amazon EC2 vCPUs that an environment can reach. With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. + /// The maximum number of Amazon EC2 vCPUs that an environment can reach. With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. public let maxvCpus: Int? - /// The minimum number of vCPUs that an environment should maintain (even if the compute environment is DISABLED). This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. + /// The minimum number of vCPUs that an environment should maintain (even if the compute environment is DISABLED). This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. public let minvCpus: Int? /// The Amazon EC2 placement group to associate with your compute resources. If you intend to submit multi-node parallel jobs to your compute environment, you should consider creating a cluster placement group and associate it with your compute resources. This keeps your multi-node parallel job on a logical grouping of instances within a single Availability Zone with high network flow potential. For more information, see Placement groups in the Amazon EC2 User Guide for Linux Instances. When updating a compute environment, changing the placement group requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide. This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. public let placementGroup: String? @@ -1994,18 +1994,22 @@ extension Batch { public let name: String? /// If this value is true, the container has read-only access to the volume. Otherwise, the container can write to the volume. The default value is false. public let readOnly: Bool? + /// A sub-path inside the referenced volume instead of its root. + public let subPath: String? @inlinable - public init(mountPath: String? = nil, name: String? = nil, readOnly: Bool? = nil) { + public init(mountPath: String? = nil, name: String? = nil, readOnly: Bool? = nil, subPath: String? = nil) { self.mountPath = mountPath self.name = name self.readOnly = readOnly + self.subPath = subPath } private enum CodingKeys: String, CodingKey { case mountPath = "mountPath" case name = "name" case readOnly = "readOnly" + case subPath = "subPath" } } @@ -2047,16 +2051,42 @@ extension Batch { } public struct EksMetadata: AWSEncodableShape & AWSDecodableShape { + /// Key-value pairs used to attach arbitrary, non-identifying metadata to Kubernetes objects. Valid annotation keys have two segments: an optional prefix and a name, separated by a slash (/). The prefix is optional and must be 253 characters or less. If specified, the prefix must be a DNS subdomain− a series of DNS labels separated by dots (.), and it must end with a slash (/). The name segment is required and must be 63 characters or less. It can include alphanumeric characters ([a-z0-9A-Z]), dashes (-), underscores (_), and dots (.), but must begin and end with an alphanumeric character. Annotation values must be 255 characters or less. Annotations can be added or modified at any time. Each resource can have multiple annotations. + public let annotations: [String: String]? /// Key-value pairs used to identify, sort, and organize cube resources. Can contain up to 63 uppercase letters, lowercase letters, numbers, hyphens (-), and underscores (_). Labels can be added or modified at any time. Each resource can have multiple labels, but each key must be unique for a given object. public let labels: [String: String]? + /// The namespace of the Amazon EKS cluster. In Kubernetes, namespaces provide a mechanism for isolating groups of resources within a single cluster. Names of resources need to be unique within a namespace, but not across namespaces. Batch places Batch Job pods in this namespace. If this field is provided, the value can't be empty or null. It must meet the following requirements: 1-63 characters long Can't be set to default Can't start with kube Must match the following regular expression: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ For more information, see Namespaces in the Kubernetes documentation. This namespace can be different from the kubernetesNamespace set in the compute environment's EksConfiguration, but must have identical role-based access control (RBAC) roles as the compute environment's kubernetesNamespace. For multi-node parallel jobs, the same value must be provided across all the node ranges. + public let namespace: String? @inlinable - public init(labels: [String: String]? = nil) { + public init(annotations: [String: String]? = nil, labels: [String: String]? = nil, namespace: String? = nil) { + self.annotations = annotations self.labels = labels + self.namespace = namespace } private enum CodingKeys: String, CodingKey { + case annotations = "annotations" case labels = "labels" + case namespace = "namespace" + } + } + + public struct EksPersistentVolumeClaim: AWSEncodableShape & AWSDecodableShape { + /// The name of the persistentVolumeClaim bounded to a persistentVolume. For more information, see Persistent Volume Claims in the Kubernetes documentation. + public let claimName: String? + /// An optional boolean value indicating if the mount is read only. Default is false. For more information, see Read Only Mounts in the Kubernetes documentation. + public let readOnly: Bool? + + @inlinable + public init(claimName: String? = nil, readOnly: Bool? = nil) { + self.claimName = claimName + self.readOnly = readOnly + } + + private enum CodingKeys: String, CodingKey { + case claimName = "claimName" + case readOnly = "readOnly" } } @@ -2278,14 +2308,17 @@ extension Batch { public let hostPath: EksHostPath? /// The name of the volume. The name must be allowed as a DNS subdomain name. For more information, see DNS subdomain names in the Kubernetes documentation. public let name: String? + /// Specifies the configuration of a Kubernetes persistentVolumeClaim bounded to a persistentVolume. For more information, see Persistent Volume Claims in the Kubernetes documentation. + public let persistentVolumeClaim: EksPersistentVolumeClaim? /// Specifies the configuration of a Kubernetes secret volume. For more information, see secret in the Kubernetes documentation. public let secret: EksSecret? @inlinable - public init(emptyDir: EksEmptyDir? = nil, hostPath: EksHostPath? = nil, name: String? = nil, secret: EksSecret? = nil) { + public init(emptyDir: EksEmptyDir? = nil, hostPath: EksHostPath? = nil, name: String? = nil, persistentVolumeClaim: EksPersistentVolumeClaim? = nil, secret: EksSecret? = nil) { self.emptyDir = emptyDir self.hostPath = hostPath self.name = name + self.persistentVolumeClaim = persistentVolumeClaim self.secret = secret } @@ -2297,6 +2330,7 @@ extension Batch { case emptyDir = "emptyDir" case hostPath = "hostPath" case name = "name" + case persistentVolumeClaim = "persistentVolumeClaim" case secret = "secret" } } @@ -2322,7 +2356,7 @@ extension Batch { public let onExitCode: String? /// Contains a glob pattern to match against the Reason returned for a job. The pattern can contain up to 512 characters. It can contain letters, numbers, periods (.), colons (:), and white space (including spaces and tabs). It can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match. public let onReason: String? - /// Contains a glob pattern to match against the StatusReason returned for a job. The pattern can contain up to 512 characters. It can contain letters, numbers, periods (.), colons (:), and white spaces (including spaces or tabs). It can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match. + /// Contains a glob pattern to match against the StatusReason returned for a job. The pattern can contain up to 512 characters. It can contain letters, numbers, periods (.), colons (:), and white spaces (including spaces or tabs). It can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match. public let onStatusReason: String? @inlinable @@ -3476,7 +3510,7 @@ extension Batch { public struct RuntimePlatform: AWSEncodableShape & AWSDecodableShape { /// The vCPU architecture. The default value is X86_64. Valid values are X86_64 and ARM64. This parameter must be set to X86_64 for Windows containers. Fargate Spot is not supported for ARM64 and Windows-based containers on Fargate. A job queue will be blocked if a Fargate ARM64 or Windows job is submitted to a job queue with only Fargate Spot compute environments. However, you can attach both FARGATE and FARGATE_SPOT compute environments to the same job queue. public let cpuArchitecture: String? - /// The operating system for the compute environment. Valid values are: LINUX (default), WINDOWS_SERVER_2019_CORE, WINDOWS_SERVER_2019_FULL, WINDOWS_SERVER_2022_CORE, and WINDOWS_SERVER_2022_FULL. The following parameters can’t be set for Windows containers: linuxParameters, privileged, user, ulimits, readonlyRootFilesystem, and efsVolumeConfiguration. The Batch Scheduler checks the compute environments that are attached to the job queue before registering a task definition with Fargate. In this scenario, the job queue is where the job is submitted. If the job requires a Windows container and the first compute environment is LINUX, the compute environment is skipped and the next compute environment is checked until a Windows-based compute environment is found. Fargate Spot is not supported for ARM64 and Windows-based containers on Fargate. A job queue will be blocked if a Fargate ARM64 or Windows job is submitted to a job queue with only Fargate Spot compute environments. However, you can attach both FARGATE and FARGATE_SPOT compute environments to the same job queue. + /// The operating system for the compute environment. Valid values are: LINUX (default), WINDOWS_SERVER_2019_CORE, WINDOWS_SERVER_2019_FULL, WINDOWS_SERVER_2022_CORE, and WINDOWS_SERVER_2022_FULL. The following parameters can’t be set for Windows containers: linuxParameters, privileged, user, ulimits, readonlyRootFilesystem, and efsVolumeConfiguration. The Batch Scheduler checks the compute environments that are attached to the job queue before registering a task definition with Fargate. In this scenario, the job queue is where the job is submitted. If the job requires a Windows container and the first compute environment is LINUX, the compute environment is skipped and the next compute environment is checked until a Windows-based compute environment is found. Fargate Spot is not supported for ARM64 and Windows-based containers on Fargate. A job queue will be blocked if a Fargate ARM64 or Windows job is submitted to a job queue with only Fargate Spot compute environments. However, you can attach both FARGATE and FARGATE_SPOT compute environments to the same job queue. public let operatingSystemFamily: String? @inlinable @@ -3592,7 +3626,7 @@ extension Batch { public let propagateTags: Bool? /// The retry strategy to use for failed jobs from this SubmitJob operation. When a retry strategy is specified here, it overrides the retry strategy defined in the job definition. public let retryStrategy: RetryStrategy? - /// The scheduling priority for the job. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. This overrides any scheduling priority in the job definition and works only within a single share identifier. The minimum supported value is 0 and the maximum supported value is 9999. + /// The scheduling priority for the job. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. This overrides any scheduling priority in the job definition and works only within a single share identifier. The minimum supported value is 0 and the maximum supported value is 9999. public let schedulingPriorityOverride: Int? /// The share identifier for the job. Don't specify this parameter if the job queue doesn't have a scheduling policy. If the job queue has a scheduling policy, then this parameter must be specified. This string is limited to 255 alphanumeric characters, and can be followed by an asterisk (*). public let shareIdentifier: String? diff --git a/Sources/Soto/Services/Bedrock/Bedrock_api.swift b/Sources/Soto/Services/Bedrock/Bedrock_api.swift index 4ea3b43bd1..8ca9ea9aa5 100644 --- a/Sources/Soto/Services/Bedrock/Bedrock_api.swift +++ b/Sources/Soto/Services/Bedrock/Bedrock_api.swift @@ -116,12 +116,14 @@ public struct Bedrock: AWSService { "bedrock-runtime-us-east-2": "bedrock-runtime.us-east-2.amazonaws.com", "bedrock-runtime-us-gov-east-1": "bedrock-runtime.us-gov-east-1.amazonaws.com", "bedrock-runtime-us-gov-west-1": "bedrock-runtime.us-gov-west-1.amazonaws.com", + "bedrock-runtime-us-iso-east-1": "bedrock-runtime.us-iso-east-1.c2s.ic.gov", "bedrock-runtime-us-west-2": "bedrock-runtime.us-west-2.amazonaws.com", "bedrock-sa-east-1": "bedrock.sa-east-1.amazonaws.com", "bedrock-us-east-1": "bedrock.us-east-1.amazonaws.com", "bedrock-us-east-2": "bedrock.us-east-2.amazonaws.com", "bedrock-us-gov-east-1": "bedrock.us-gov-east-1.amazonaws.com", "bedrock-us-gov-west-1": "bedrock.us-gov-west-1.amazonaws.com", + "bedrock-us-iso-east-1": "bedrock.us-iso-east-1.c2s.ic.gov", "bedrock-us-west-2": "bedrock.us-west-2.amazonaws.com" ]} diff --git a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift index ec2111f7b7..89753a0e36 100644 --- a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift +++ b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift @@ -2283,10 +2283,10 @@ extension BedrockAgentRuntime { /// The name of the flow output node that the output is from. public let nodeName: String /// The type of the node that the output is from. - public let nodeType: NodeType + public let nodeType: NodeType? @inlinable - public init(content: FlowOutputContent, nodeName: String, nodeType: NodeType) { + public init(content: FlowOutputContent, nodeName: String, nodeType: NodeType? = nil) { self.content = content self.nodeName = nodeName self.nodeType = nodeType diff --git a/Sources/Soto/Services/Budgets/Budgets_api.swift b/Sources/Soto/Services/Budgets/Budgets_api.swift index b2ed71ef7b..d8f20643e3 100644 --- a/Sources/Soto/Services/Budgets/Budgets_api.swift +++ b/Sources/Soto/Services/Budgets/Budgets_api.swift @@ -79,13 +79,15 @@ public struct Budgets: AWSService { static var serviceEndpoints: [String: String] {[ "aws-cn-global": "budgets.amazonaws.com.cn", "aws-global": "budgets.amazonaws.com", - "us-isob-east-1": "budgets.us-isob-east-1.sc2s.sgov.gov" + "aws-iso-b-global": "budgets.global.sc2s.sgov.gov", + "us-isob-east-1": "budgets.global.sc2s.sgov.gov" ]} /// Default endpoint and region to use for each partition static var partitionEndpoints: [AWSPartition: (endpoint: String, region: SotoCore.Region)] {[ .aws: (endpoint: "aws-global", region: .useast1), - .awscn: (endpoint: "aws-cn-global", region: .cnnorthwest1) + .awscn: (endpoint: "aws-cn-global", region: .cnnorthwest1), + .awsisob: (endpoint: "aws-iso-b-global", region: .usisobeast1) ]} diff --git a/Sources/Soto/Services/CleanRoomsML/CleanRoomsML_api.swift b/Sources/Soto/Services/CleanRoomsML/CleanRoomsML_api.swift index 1a5fb7e62f..d6b437b0ff 100644 --- a/Sources/Soto/Services/CleanRoomsML/CleanRoomsML_api.swift +++ b/Sources/Soto/Services/CleanRoomsML/CleanRoomsML_api.swift @@ -1978,7 +1978,7 @@ public struct CleanRoomsML: AWSService { /// Parameters: /// - configuredModelAlgorithmAssociationArn: The Amazon Resource Name (ARN) of the configured model algorithm association that is used for this trained model inference job. /// - containerExecutionParameters: The execution parameters for the container. - /// - dataSource: Defines he data source that is used for the trained model inference job. + /// - dataSource: Defines the data source that is used for the trained model inference job. /// - description: The description of the trained model inference job. /// - environment: The environment variables to set in the Docker container. /// - kmsKeyArn: The Amazon Resource Name (ARN) of the KMS key. This key is used to encrypt and decrypt customer-owned data in the ML inference job and associated data. diff --git a/Sources/Soto/Services/CleanRoomsML/CleanRoomsML_shapes.swift b/Sources/Soto/Services/CleanRoomsML/CleanRoomsML_shapes.swift index 734aa75580..afdcd08a8a 100644 --- a/Sources/Soto/Services/CleanRoomsML/CleanRoomsML_shapes.swift +++ b/Sources/Soto/Services/CleanRoomsML/CleanRoomsML_shapes.swift @@ -453,13 +453,15 @@ extension CleanRoomsML { public let dataSource: S3ConfigMap? /// The ARN of the IAM role that can read the Amazon S3 bucket where the seed audience is stored. public let roleArn: String + public let sqlComputeConfiguration: ComputeConfiguration? /// The protected SQL query parameters. public let sqlParameters: ProtectedQuerySQLParameters? @inlinable - public init(dataSource: S3ConfigMap? = nil, roleArn: String, sqlParameters: ProtectedQuerySQLParameters? = nil) { + public init(dataSource: S3ConfigMap? = nil, roleArn: String, sqlComputeConfiguration: ComputeConfiguration? = nil, sqlParameters: ProtectedQuerySQLParameters? = nil) { self.dataSource = dataSource self.roleArn = roleArn + self.sqlComputeConfiguration = sqlComputeConfiguration self.sqlParameters = sqlParameters } @@ -474,6 +476,7 @@ extension CleanRoomsML { private enum CodingKeys: String, CodingKey { case dataSource = "dataSource" case roleArn = "roleArn" + case sqlComputeConfiguration = "sqlComputeConfiguration" case sqlParameters = "sqlParameters" } } @@ -4947,7 +4950,7 @@ extension CleanRoomsML { public let configuredModelAlgorithmAssociationArn: String? /// The execution parameters for the container. public let containerExecutionParameters: InferenceContainerExecutionParameters? - /// Defines he data source that is used for the trained model inference job. + /// Defines the data source that is used for the trained model inference job. public let dataSource: ModelInferenceDataSource /// The description of the trained model inference job. public let description: String? diff --git a/Sources/Soto/Services/Cloud9/Cloud9_api.swift b/Sources/Soto/Services/Cloud9/Cloud9_api.swift index 47b9d8522a..b7feb76f68 100644 --- a/Sources/Soto/Services/Cloud9/Cloud9_api.swift +++ b/Sources/Soto/Services/Cloud9/Cloud9_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS Cloud9 service. /// -/// Cloud9 Cloud9 is a collection of tools that you can use to code, build, run, test, debug, and release software in the cloud. For more information about Cloud9, see the Cloud9 User Guide. Cloud9 supports these operations: CreateEnvironmentEC2: Creates an Cloud9 development environment, launches an Amazon EC2 instance, and then connects from the instance to the environment. CreateEnvironmentMembership: Adds an environment member to an environment. DeleteEnvironment: Deletes an environment. If an Amazon EC2 instance is connected to the environment, also terminates the instance. DeleteEnvironmentMembership: Deletes an environment member from an environment. DescribeEnvironmentMemberships: Gets information about environment members for an environment. DescribeEnvironments: Gets information about environments. DescribeEnvironmentStatus: Gets status information for an environment. ListEnvironments: Gets a list of environment identifiers. ListTagsForResource: Gets the tags for an environment. TagResource: Adds tags to an environment. UntagResource: Removes tags from an environment. UpdateEnvironment: Changes the settings of an existing environment. UpdateEnvironmentMembership: Changes the settings of an existing environment member for an environment. +/// Cloud9 Cloud9 is a collection of tools that you can use to code, build, run, test, debug, and release software in the cloud. For more information about Cloud9, see the Cloud9 User Guide. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" Cloud9 supports these operations: CreateEnvironmentEC2: Creates an Cloud9 development environment, launches an Amazon EC2 instance, and then connects from the instance to the environment. CreateEnvironmentMembership: Adds an environment member to an environment. DeleteEnvironment: Deletes an environment. If an Amazon EC2 instance is connected to the environment, also terminates the instance. DeleteEnvironmentMembership: Deletes an environment member from an environment. DescribeEnvironmentMemberships: Gets information about environment members for an environment. DescribeEnvironments: Gets information about environments. DescribeEnvironmentStatus: Gets status information for an environment. ListEnvironments: Gets a list of environment identifiers. ListTagsForResource: Gets the tags for an environment. TagResource: Adds tags to an environment. UntagResource: Removes tags from an environment. UpdateEnvironment: Changes the settings of an existing environment. UpdateEnvironmentMembership: Changes the settings of an existing environment member for an environment. public struct Cloud9: AWSService { // MARK: Member variables @@ -122,7 +122,7 @@ public struct Cloud9: AWSService { // MARK: API Calls - /// Creates an Cloud9 development environment, launches an Amazon Elastic Compute Cloud (Amazon EC2) instance, and then connects from the instance to the environment. + /// Creates an Cloud9 development environment, launches an Amazon Elastic Compute Cloud (Amazon EC2) instance, and then connects from the instance to the environment. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" @Sendable @inlinable public func createEnvironmentEC2(_ input: CreateEnvironmentEC2Request, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateEnvironmentEC2Result { @@ -135,7 +135,7 @@ public struct Cloud9: AWSService { logger: logger ) } - /// Creates an Cloud9 development environment, launches an Amazon Elastic Compute Cloud (Amazon EC2) instance, and then connects from the instance to the environment. + /// Creates an Cloud9 development environment, launches an Amazon Elastic Compute Cloud (Amazon EC2) instance, and then connects from the instance to the environment. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" /// /// Parameters: /// - automaticStopTimeMinutes: The number of minutes until the running instance is shut down after the environment has last been used. @@ -143,7 +143,7 @@ public struct Cloud9: AWSService { /// - connectionType: The connection type used for connecting to an Amazon EC2 environment. Valid values are CONNECT_SSH (default) and CONNECT_SSM (connected through Amazon EC2 Systems Manager). For more information, see Accessing no-ingress EC2 instances with Amazon EC2 Systems Manager in the Cloud9 User Guide. /// - description: The description of the environment to create. /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - /// - imageId: The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM) path. From December 04, 2023, you will be required to include the imageId parameter for the CreateEnvironmentEC2 action. This change will be reflected across all direct methods of communicating with the API, such as Amazon Web Services SDK, Amazon Web Services CLI and Amazon Web Services CloudFormation. This change will only affect direct API consumers, and not Cloud9 console users. We recommend using Amazon Linux 2023 as the AMI to create your environment as it is fully supported. Since Ubuntu 18.04 has ended standard support as of May 31, 2023, we recommend you choose Ubuntu 22.04. AMI aliases Amazon Linux 2: amazonlinux-2-x86_64 Amazon Linux 2023 (recommended): amazonlinux-2023-x86_64 Ubuntu 18.04: ubuntu-18.04-x86_64 Ubuntu 22.04: ubuntu-22.04-x86_64 SSM paths Amazon Linux 2: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64 Amazon Linux 2023 (recommended): resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2023-x86_64 Ubuntu 18.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64 Ubuntu 22.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-22.04-x86_64 + /// - imageId: The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM) path. We recommend using Amazon Linux 2023 as the AMI to create your environment as it is fully supported. From December 16, 2024, Ubuntu 18.04 will be removed from the list of available imageIds for Cloud9. This change is necessary as Ubuntu 18.04 has ended standard support on May 31, 2023. This change will only affect direct API consumers, and not Cloud9 console users. Since Ubuntu 18.04 has ended standard support as of May 31, 2023, we recommend you choose Ubuntu 22.04. AMI aliases Amazon Linux 2: amazonlinux-2-x86_64 Amazon Linux 2023 (recommended): amazonlinux-2023-x86_64 Ubuntu 18.04: ubuntu-18.04-x86_64 Ubuntu 22.04: ubuntu-22.04-x86_64 SSM paths Amazon Linux 2: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64 Amazon Linux 2023 (recommended): resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2023-x86_64 Ubuntu 18.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64 Ubuntu 22.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-22.04-x86_64 /// - instanceType: The type of instance to connect to the environment (for example, t2.micro). /// - name: The name of the environment to create. This name is visible to other IAM users in the same Amazon Web Services account. /// - ownerArn: The Amazon Resource Name (ARN) of the environment owner. This ARN can be the ARN of any IAM principal. If this value is not specified, the ARN defaults to this environment's creator. @@ -181,7 +181,7 @@ public struct Cloud9: AWSService { return try await self.createEnvironmentEC2(input, logger: logger) } - /// Adds an environment member to an Cloud9 development environment. + /// Adds an environment member to an Cloud9 development environment. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" @Sendable @inlinable public func createEnvironmentMembership(_ input: CreateEnvironmentMembershipRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateEnvironmentMembershipResult { @@ -194,7 +194,7 @@ public struct Cloud9: AWSService { logger: logger ) } - /// Adds an environment member to an Cloud9 development environment. + /// Adds an environment member to an Cloud9 development environment. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" /// /// Parameters: /// - environmentId: The ID of the environment that contains the environment member you want to add. @@ -216,7 +216,7 @@ public struct Cloud9: AWSService { return try await self.createEnvironmentMembership(input, logger: logger) } - /// Deletes an Cloud9 development environment. If an Amazon EC2 instance is connected to the environment, also terminates the instance. + /// Deletes an Cloud9 development environment. If an Amazon EC2 instance is connected to the environment, also terminates the instance. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" @Sendable @inlinable public func deleteEnvironment(_ input: DeleteEnvironmentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteEnvironmentResult { @@ -229,7 +229,7 @@ public struct Cloud9: AWSService { logger: logger ) } - /// Deletes an Cloud9 development environment. If an Amazon EC2 instance is connected to the environment, also terminates the instance. + /// Deletes an Cloud9 development environment. If an Amazon EC2 instance is connected to the environment, also terminates the instance. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" /// /// Parameters: /// - environmentId: The ID of the environment to delete. @@ -245,7 +245,7 @@ public struct Cloud9: AWSService { return try await self.deleteEnvironment(input, logger: logger) } - /// Deletes an environment member from a development environment. + /// Deletes an environment member from a development environment. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" @Sendable @inlinable public func deleteEnvironmentMembership(_ input: DeleteEnvironmentMembershipRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteEnvironmentMembershipResult { @@ -258,7 +258,7 @@ public struct Cloud9: AWSService { logger: logger ) } - /// Deletes an environment member from a development environment. + /// Deletes an environment member from a development environment. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" /// /// Parameters: /// - environmentId: The ID of the environment to delete the environment member from. @@ -277,7 +277,7 @@ public struct Cloud9: AWSService { return try await self.deleteEnvironmentMembership(input, logger: logger) } - /// Gets information about environment members for an Cloud9 development environment. + /// Gets information about environment members for an Cloud9 development environment. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" @Sendable @inlinable public func describeEnvironmentMemberships(_ input: DescribeEnvironmentMembershipsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeEnvironmentMembershipsResult { @@ -290,7 +290,7 @@ public struct Cloud9: AWSService { logger: logger ) } - /// Gets information about environment members for an Cloud9 development environment. + /// Gets information about environment members for an Cloud9 development environment. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" /// /// Parameters: /// - environmentId: The ID of the environment to get environment member information about. @@ -318,7 +318,7 @@ public struct Cloud9: AWSService { return try await self.describeEnvironmentMemberships(input, logger: logger) } - /// Gets status information for an Cloud9 development environment. + /// Gets status information for an Cloud9 development environment. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" @Sendable @inlinable public func describeEnvironmentStatus(_ input: DescribeEnvironmentStatusRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeEnvironmentStatusResult { @@ -331,7 +331,7 @@ public struct Cloud9: AWSService { logger: logger ) } - /// Gets status information for an Cloud9 development environment. + /// Gets status information for an Cloud9 development environment. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" /// /// Parameters: /// - environmentId: The ID of the environment to get status information about. @@ -347,7 +347,7 @@ public struct Cloud9: AWSService { return try await self.describeEnvironmentStatus(input, logger: logger) } - /// Gets information about Cloud9 development environments. + /// Gets information about Cloud9 development environments. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" @Sendable @inlinable public func describeEnvironments(_ input: DescribeEnvironmentsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeEnvironmentsResult { @@ -360,7 +360,7 @@ public struct Cloud9: AWSService { logger: logger ) } - /// Gets information about Cloud9 development environments. + /// Gets information about Cloud9 development environments. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" /// /// Parameters: /// - environmentIds: The IDs of individual environments to get information about. @@ -376,7 +376,7 @@ public struct Cloud9: AWSService { return try await self.describeEnvironments(input, logger: logger) } - /// Gets a list of Cloud9 development environment identifiers. + /// Gets a list of Cloud9 development environment identifiers. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" @Sendable @inlinable public func listEnvironments(_ input: ListEnvironmentsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListEnvironmentsResult { @@ -389,7 +389,7 @@ public struct Cloud9: AWSService { logger: logger ) } - /// Gets a list of Cloud9 development environment identifiers. + /// Gets a list of Cloud9 development environment identifiers. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" /// /// Parameters: /// - maxResults: The maximum number of environments to get identifiers for. @@ -408,7 +408,7 @@ public struct Cloud9: AWSService { return try await self.listEnvironments(input, logger: logger) } - /// Gets a list of the tags associated with an Cloud9 development environment. + /// Gets a list of the tags associated with an Cloud9 development environment. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" @Sendable @inlinable public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse { @@ -421,7 +421,7 @@ public struct Cloud9: AWSService { logger: logger ) } - /// Gets a list of the tags associated with an Cloud9 development environment. + /// Gets a list of the tags associated with an Cloud9 development environment. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" /// /// Parameters: /// - resourceARN: The Amazon Resource Name (ARN) of the Cloud9 development environment to get the tags for. @@ -437,7 +437,7 @@ public struct Cloud9: AWSService { return try await self.listTagsForResource(input, logger: logger) } - /// Adds tags to an Cloud9 development environment. Tags that you add to an Cloud9 environment by using this method will NOT be automatically propagated to underlying resources. + /// Adds tags to an Cloud9 development environment. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" Tags that you add to an Cloud9 environment by using this method will NOT be automatically propagated to underlying resources. @Sendable @inlinable public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse { @@ -450,7 +450,7 @@ public struct Cloud9: AWSService { logger: logger ) } - /// Adds tags to an Cloud9 development environment. Tags that you add to an Cloud9 environment by using this method will NOT be automatically propagated to underlying resources. + /// Adds tags to an Cloud9 development environment. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" Tags that you add to an Cloud9 environment by using this method will NOT be automatically propagated to underlying resources. /// /// Parameters: /// - resourceARN: The Amazon Resource Name (ARN) of the Cloud9 development environment to add tags to. @@ -469,7 +469,7 @@ public struct Cloud9: AWSService { return try await self.tagResource(input, logger: logger) } - /// Removes tags from an Cloud9 development environment. + /// Removes tags from an Cloud9 development environment. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" @Sendable @inlinable public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResponse { @@ -482,7 +482,7 @@ public struct Cloud9: AWSService { logger: logger ) } - /// Removes tags from an Cloud9 development environment. + /// Removes tags from an Cloud9 development environment. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" /// /// Parameters: /// - resourceARN: The Amazon Resource Name (ARN) of the Cloud9 development environment to remove tags from. @@ -501,7 +501,7 @@ public struct Cloud9: AWSService { return try await self.untagResource(input, logger: logger) } - /// Changes the settings of an existing Cloud9 development environment. + /// Changes the settings of an existing Cloud9 development environment. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" @Sendable @inlinable public func updateEnvironment(_ input: UpdateEnvironmentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateEnvironmentResult { @@ -514,7 +514,7 @@ public struct Cloud9: AWSService { logger: logger ) } - /// Changes the settings of an existing Cloud9 development environment. + /// Changes the settings of an existing Cloud9 development environment. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" /// /// Parameters: /// - description: Any new or replacement description for the environment. @@ -539,7 +539,7 @@ public struct Cloud9: AWSService { return try await self.updateEnvironment(input, logger: logger) } - /// Changes the settings of an existing environment member for an Cloud9 development environment. + /// Changes the settings of an existing environment member for an Cloud9 development environment. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" @Sendable @inlinable public func updateEnvironmentMembership(_ input: UpdateEnvironmentMembershipRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateEnvironmentMembershipResult { @@ -552,7 +552,7 @@ public struct Cloud9: AWSService { logger: logger ) } - /// Changes the settings of an existing environment member for an Cloud9 development environment. + /// Changes the settings of an existing environment member for an Cloud9 development environment. Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more" /// /// Parameters: /// - environmentId: The ID of the environment for the environment member whose settings you want to change. diff --git a/Sources/Soto/Services/Cloud9/Cloud9_shapes.swift b/Sources/Soto/Services/Cloud9/Cloud9_shapes.swift index a5f8073779..a403b21cc9 100644 --- a/Sources/Soto/Services/Cloud9/Cloud9_shapes.swift +++ b/Sources/Soto/Services/Cloud9/Cloud9_shapes.swift @@ -105,7 +105,7 @@ extension Cloud9 { public let description: String? /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? - /// The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM) path. From December 04, 2023, you will be required to include the imageId parameter for the CreateEnvironmentEC2 action. This change will be reflected across all direct methods of communicating with the API, such as Amazon Web Services SDK, Amazon Web Services CLI and Amazon Web Services CloudFormation. This change will only affect direct API consumers, and not Cloud9 console users. We recommend using Amazon Linux 2023 as the AMI to create your environment as it is fully supported. Since Ubuntu 18.04 has ended standard support as of May 31, 2023, we recommend you choose Ubuntu 22.04. AMI aliases Amazon Linux 2: amazonlinux-2-x86_64 Amazon Linux 2023 (recommended): amazonlinux-2023-x86_64 Ubuntu 18.04: ubuntu-18.04-x86_64 Ubuntu 22.04: ubuntu-22.04-x86_64 SSM paths Amazon Linux 2: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64 Amazon Linux 2023 (recommended): resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2023-x86_64 Ubuntu 18.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64 Ubuntu 22.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-22.04-x86_64 + /// The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM) path. We recommend using Amazon Linux 2023 as the AMI to create your environment as it is fully supported. From December 16, 2024, Ubuntu 18.04 will be removed from the list of available imageIds for Cloud9. This change is necessary as Ubuntu 18.04 has ended standard support on May 31, 2023. This change will only affect direct API consumers, and not Cloud9 console users. Since Ubuntu 18.04 has ended standard support as of May 31, 2023, we recommend you choose Ubuntu 22.04. AMI aliases Amazon Linux 2: amazonlinux-2-x86_64 Amazon Linux 2023 (recommended): amazonlinux-2023-x86_64 Ubuntu 18.04: ubuntu-18.04-x86_64 Ubuntu 22.04: ubuntu-22.04-x86_64 SSM paths Amazon Linux 2: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64 Amazon Linux 2023 (recommended): resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2023-x86_64 Ubuntu 18.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64 Ubuntu 22.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-22.04-x86_64 public let imageId: String /// The type of instance to connect to the environment (for example, t2.micro). public let instanceType: String diff --git a/Sources/Soto/Services/CloudFront/CloudFront_shapes.swift b/Sources/Soto/Services/CloudFront/CloudFront_shapes.swift index 4cf6339013..750a050bd1 100644 --- a/Sources/Soto/Services/CloudFront/CloudFront_shapes.swift +++ b/Sources/Soto/Services/CloudFront/CloudFront_shapes.swift @@ -2634,7 +2634,7 @@ extension CloudFront { public let httpsPort: Int /// Specifies how long, in seconds, CloudFront persists its connection to the origin. The /// minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't - /// specify otherwise) is 5 seconds. For more information, see Origin Keep-alive Timeout in the + /// specify otherwise) is 5 seconds. For more information, see Keep-alive timeout (custom origins only) in the /// Amazon CloudFront Developer Guide. public let originKeepaliveTimeout: Int? /// Specifies the protocol (HTTP or HTTPS) that CloudFront uses to connect to the origin. Valid @@ -2646,7 +2646,7 @@ extension CloudFront { /// Specifies how long, in seconds, CloudFront waits for a response from the origin. This is /// also known as the origin response timeout. The minimum timeout is 1 /// second, the maximum is 60 seconds, and the default (if you don't specify otherwise) is - /// 30 seconds. For more information, see Origin Response Timeout in the + /// 30 seconds. For more information, see Response timeout (custom origins only) in the /// Amazon CloudFront Developer Guide. public let originReadTimeout: Int? /// Specifies the minimum SSL/TLS protocol that CloudFront uses when connecting to your origin @@ -3495,17 +3495,21 @@ extension CloudFront { /// PathPattern in CacheBehavior elements. You must create /// exactly one default cache behavior. public let defaultCacheBehavior: DefaultCacheBehavior - /// The object that you want CloudFront to request from your origin (for example, - /// index.html) when a viewer requests the root URL for your distribution - /// (https://www.example.com) instead of an object in your distribution - /// (https://www.example.com/product-description.html). Specifying a - /// default root object avoids exposing the contents of your distribution. Specify only the object name, for example, index.html. Don't add a - /// / before the object name. If you don't want to specify a default root object when you create a distribution, + /// When a viewer requests the root URL for your distribution, the default root object is the + /// object that you want CloudFront to request from your origin. For example, if your root URL is + /// https://www.example.com, you can specify CloudFront to return the + /// index.html file as the default root object. You can specify a default + /// root object so that viewers see a specific file or object, instead of another object in + /// your distribution (for example, + /// https://www.example.com/product-description.html). A default root + /// object avoids exposing the contents of your distribution. You can specify the object name or a path to the object name (for example, + /// index.html or exampleFolderName/index.html). Your string + /// can't begin with a forward slash (/). Only specify the object name or the + /// path to the object. If you don't want to specify a default root object when you create a distribution, /// include an empty DefaultRootObject element. To delete the default root object from an existing distribution, update the /// distribution configuration and include an empty DefaultRootObject /// element. To replace the default root object, update the distribution configuration and specify - /// the new object. For more information about the default root object, see Creating a - /// Default Root Object in the Amazon CloudFront Developer Guide. + /// the new object. For more information about the default root object, see Specify a default root object in the Amazon CloudFront Developer Guide. public let defaultRootObject: String? /// From this field, you can enable or disable the selected distribution. public let enabled: Bool @@ -10882,15 +10886,30 @@ extension CloudFront { } public struct VpcOriginConfig: AWSEncodableShape & AWSDecodableShape { + /// Specifies how long, in seconds, CloudFront persists its connection to the origin. The + /// minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't + /// specify otherwise) is 5 seconds. For more information, see Keep-alive timeout (custom origins only) in the + /// Amazon CloudFront Developer Guide. + public let originKeepaliveTimeout: Int? + /// Specifies how long, in seconds, CloudFront waits for a response from the origin. This is + /// also known as the origin response timeout. The minimum timeout is 1 + /// second, the maximum is 60 seconds, and the default (if you don't specify otherwise) is + /// 30 seconds. For more information, see Response timeout (custom origins only) in the + /// Amazon CloudFront Developer Guide. + public let originReadTimeout: Int? /// The VPC origin ID. public let vpcOriginId: String @inlinable - public init(vpcOriginId: String) { + public init(originKeepaliveTimeout: Int? = nil, originReadTimeout: Int? = nil, vpcOriginId: String) { + self.originKeepaliveTimeout = originKeepaliveTimeout + self.originReadTimeout = originReadTimeout self.vpcOriginId = vpcOriginId } private enum CodingKeys: String, CodingKey { + case originKeepaliveTimeout = "OriginKeepaliveTimeout" + case originReadTimeout = "OriginReadTimeout" case vpcOriginId = "VpcOriginId" } } diff --git a/Sources/Soto/Services/CloudHSMV2/CloudHSMV2_api.swift b/Sources/Soto/Services/CloudHSMV2/CloudHSMV2_api.swift index 632c22abab..364ecfa9c2 100644 --- a/Sources/Soto/Services/CloudHSMV2/CloudHSMV2_api.swift +++ b/Sources/Soto/Services/CloudHSMV2/CloudHSMV2_api.swift @@ -169,6 +169,7 @@ public struct CloudHSMV2: AWSService { /// - backupRetentionPolicy: A policy that defines how the service retains backups. /// - hsmType: The type of HSM to use in the cluster. The allowed values are hsm1.medium and hsm2m.medium. /// - mode: The mode to use in the cluster. The allowed values are FIPS and NON_FIPS. + /// - networkType: The NetworkType to create a cluster with. The allowed values are IPV4 and DUALSTACK. /// - sourceBackupId: The identifier (ID) or the Amazon Resource Name (ARN) of the cluster backup to restore. Use this value to restore the cluster from a backup instead of creating a new cluster. To find the backup ID or ARN, use DescribeBackups. If using a backup in another account, the full ARN must be supplied. /// - subnetIds: The identifiers (IDs) of the subnets where you are creating the cluster. You must specify at least one subnet. If you specify multiple subnets, they must meet the following criteria: All subnets must be in the same virtual private cloud (VPC). You can specify only one subnet per Availability Zone. /// - tagList: Tags to apply to the CloudHSM cluster during creation. @@ -178,6 +179,7 @@ public struct CloudHSMV2: AWSService { backupRetentionPolicy: BackupRetentionPolicy? = nil, hsmType: String, mode: ClusterMode? = nil, + networkType: NetworkType? = nil, sourceBackupId: String? = nil, subnetIds: [String], tagList: [Tag]? = nil, @@ -187,6 +189,7 @@ public struct CloudHSMV2: AWSService { backupRetentionPolicy: backupRetentionPolicy, hsmType: hsmType, mode: mode, + networkType: networkType, sourceBackupId: sourceBackupId, subnetIds: subnetIds, tagList: tagList diff --git a/Sources/Soto/Services/CloudHSMV2/CloudHSMV2_shapes.swift b/Sources/Soto/Services/CloudHSMV2/CloudHSMV2_shapes.swift index 1b7d47ec81..ac54a387eb 100644 --- a/Sources/Soto/Services/CloudHSMV2/CloudHSMV2_shapes.swift +++ b/Sources/Soto/Services/CloudHSMV2/CloudHSMV2_shapes.swift @@ -58,6 +58,8 @@ extension CloudHSMV2 { case deleted = "DELETED" case initializeInProgress = "INITIALIZE_IN_PROGRESS" case initialized = "INITIALIZED" + case modifyInProgress = "MODIFY_IN_PROGRESS" + case rollbackInProgress = "ROLLBACK_IN_PROGRESS" case uninitialized = "UNINITIALIZED" case updateInProgress = "UPDATE_IN_PROGRESS" public var description: String { return self.rawValue } @@ -72,6 +74,12 @@ extension CloudHSMV2 { public var description: String { return self.rawValue } } + public enum NetworkType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case dualstack = "DUALSTACK" + case ipv4 = "IPV4" + public var description: String { return self.rawValue } + } + // MARK: Shapes public struct Backup: AWSDecodableShape { @@ -211,6 +219,8 @@ extension CloudHSMV2 { public let hsmType: String? /// The mode of the cluster. public let mode: ClusterMode? + /// The cluster's NetworkType can be set to either IPV4 (which is the default) or DUALSTACK. When set to IPV4, communication between your application and the Hardware Security Modules (HSMs) is restricted to the IPv4 protocol only. In contrast, the DUALSTACK network type enables communication over both the IPv4 and IPv6 protocols. To use the DUALSTACK option, you'll need to configure your Virtual Private Cloud (VPC) and subnets to support both IPv4 and IPv6. This involves adding IPv6 Classless Inter-Domain Routing (CIDR) blocks to the existing IPv4 CIDR blocks in your subnets. The choice between IPV4 and DUALSTACK network types determines the flexibility of the network addressing setup for your cluster. The DUALSTACK option provides more flexibility by allowing both IPv4 and IPv6 communication. + public let networkType: NetworkType? /// The default password for the cluster's Pre-Crypto Officer (PRECO) user. public let preCoPassword: String? /// The identifier (ID) of the cluster's security group. @@ -229,7 +239,7 @@ extension CloudHSMV2 { public let vpcId: String? @inlinable - public init(backupPolicy: BackupPolicy? = nil, backupRetentionPolicy: BackupRetentionPolicy? = nil, certificates: Certificates? = nil, clusterId: String? = nil, createTimestamp: Date? = nil, hsms: [Hsm]? = nil, hsmType: String? = nil, mode: ClusterMode? = nil, preCoPassword: String? = nil, securityGroup: String? = nil, sourceBackupId: String? = nil, state: ClusterState? = nil, stateMessage: String? = nil, subnetMapping: [String: String]? = nil, tagList: [Tag]? = nil, vpcId: String? = nil) { + public init(backupPolicy: BackupPolicy? = nil, backupRetentionPolicy: BackupRetentionPolicy? = nil, certificates: Certificates? = nil, clusterId: String? = nil, createTimestamp: Date? = nil, hsms: [Hsm]? = nil, hsmType: String? = nil, mode: ClusterMode? = nil, networkType: NetworkType? = nil, preCoPassword: String? = nil, securityGroup: String? = nil, sourceBackupId: String? = nil, state: ClusterState? = nil, stateMessage: String? = nil, subnetMapping: [String: String]? = nil, tagList: [Tag]? = nil, vpcId: String? = nil) { self.backupPolicy = backupPolicy self.backupRetentionPolicy = backupRetentionPolicy self.certificates = certificates @@ -238,6 +248,7 @@ extension CloudHSMV2 { self.hsms = hsms self.hsmType = hsmType self.mode = mode + self.networkType = networkType self.preCoPassword = preCoPassword self.securityGroup = securityGroup self.sourceBackupId = sourceBackupId @@ -257,6 +268,7 @@ extension CloudHSMV2 { case hsms = "Hsms" case hsmType = "HsmType" case mode = "Mode" + case networkType = "NetworkType" case preCoPassword = "PreCoPassword" case securityGroup = "SecurityGroup" case sourceBackupId = "SourceBackupId" @@ -321,6 +333,8 @@ extension CloudHSMV2 { public let hsmType: String /// The mode to use in the cluster. The allowed values are FIPS and NON_FIPS. public let mode: ClusterMode? + /// The NetworkType to create a cluster with. The allowed values are IPV4 and DUALSTACK. + public let networkType: NetworkType? /// The identifier (ID) or the Amazon Resource Name (ARN) of the cluster backup to restore. Use this value to restore the cluster from a backup instead of creating a new cluster. To find the backup ID or ARN, use DescribeBackups. If using a backup in another account, the full ARN must be supplied. public let sourceBackupId: String? /// The identifiers (IDs) of the subnets where you are creating the cluster. You must specify at least one subnet. If you specify multiple subnets, they must meet the following criteria: All subnets must be in the same virtual private cloud (VPC). You can specify only one subnet per Availability Zone. @@ -329,10 +343,11 @@ extension CloudHSMV2 { public let tagList: [Tag]? @inlinable - public init(backupRetentionPolicy: BackupRetentionPolicy? = nil, hsmType: String, mode: ClusterMode? = nil, sourceBackupId: String? = nil, subnetIds: [String], tagList: [Tag]? = nil) { + public init(backupRetentionPolicy: BackupRetentionPolicy? = nil, hsmType: String, mode: ClusterMode? = nil, networkType: NetworkType? = nil, sourceBackupId: String? = nil, subnetIds: [String], tagList: [Tag]? = nil) { self.backupRetentionPolicy = backupRetentionPolicy self.hsmType = hsmType self.mode = mode + self.networkType = networkType self.sourceBackupId = sourceBackupId self.subnetIds = subnetIds self.tagList = tagList @@ -359,6 +374,7 @@ extension CloudHSMV2 { case backupRetentionPolicy = "BackupRetentionPolicy" case hsmType = "HsmType" case mode = "Mode" + case networkType = "NetworkType" case sourceBackupId = "SourceBackupId" case subnetIds = "SubnetIds" case tagList = "TagList" @@ -746,6 +762,8 @@ extension CloudHSMV2 { public let eniId: String? /// The IP address of the HSM's elastic network interface (ENI). public let eniIp: String? + /// The IPv6 address (if any) of the HSM's elastic network interface (ENI). + public let eniIpV6: String? /// The HSM's identifier (ID). public let hsmId: String /// The HSM's state. @@ -756,11 +774,12 @@ extension CloudHSMV2 { public let subnetId: String? @inlinable - public init(availabilityZone: String? = nil, clusterId: String? = nil, eniId: String? = nil, eniIp: String? = nil, hsmId: String, state: HsmState? = nil, stateMessage: String? = nil, subnetId: String? = nil) { + public init(availabilityZone: String? = nil, clusterId: String? = nil, eniId: String? = nil, eniIp: String? = nil, eniIpV6: String? = nil, hsmId: String, state: HsmState? = nil, stateMessage: String? = nil, subnetId: String? = nil) { self.availabilityZone = availabilityZone self.clusterId = clusterId self.eniId = eniId self.eniIp = eniIp + self.eniIpV6 = eniIpV6 self.hsmId = hsmId self.state = state self.stateMessage = stateMessage @@ -772,6 +791,7 @@ extension CloudHSMV2 { case clusterId = "ClusterId" case eniId = "EniId" case eniIp = "EniIp" + case eniIpV6 = "EniIpV6" case hsmId = "HsmId" case state = "State" case stateMessage = "StateMessage" @@ -1119,6 +1139,7 @@ public struct CloudHSMV2ErrorType: AWSErrorType { case cloudHsmAccessDeniedException = "CloudHsmAccessDeniedException" case cloudHsmInternalFailureException = "CloudHsmInternalFailureException" case cloudHsmInvalidRequestException = "CloudHsmInvalidRequestException" + case cloudHsmResourceLimitExceededException = "CloudHsmResourceLimitExceededException" case cloudHsmResourceNotFoundException = "CloudHsmResourceNotFoundException" case cloudHsmServiceException = "CloudHsmServiceException" case cloudHsmTagException = "CloudHsmTagException" @@ -1148,6 +1169,8 @@ public struct CloudHSMV2ErrorType: AWSErrorType { public static var cloudHsmInternalFailureException: Self { .init(.cloudHsmInternalFailureException) } /// The request was rejected because it is not a valid request. public static var cloudHsmInvalidRequestException: Self { .init(.cloudHsmInvalidRequestException) } + /// The request was rejected because it exceeds an CloudHSM limit. + public static var cloudHsmResourceLimitExceededException: Self { .init(.cloudHsmResourceLimitExceededException) } /// The request was rejected because it refers to a resource that cannot be found. public static var cloudHsmResourceNotFoundException: Self { .init(.cloudHsmResourceNotFoundException) } /// The request was rejected because an error occurred. diff --git a/Sources/Soto/Services/CloudTrail/CloudTrail_shapes.swift b/Sources/Soto/Services/CloudTrail/CloudTrail_shapes.swift index 708ca79b5e..292de6c06e 100644 --- a/Sources/Soto/Services/CloudTrail/CloudTrail_shapes.swift +++ b/Sources/Soto/Services/CloudTrail/CloudTrail_shapes.swift @@ -221,7 +221,7 @@ extension CloudTrail { public let endsWith: [String]? /// An operator that includes events that match the exact value of the event record field specified as the value of Field. This is the only valid operator that you can use with the readOnly, eventCategory, and resources.type fields. public let equals: [String]? - /// A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for selecting events as filtering is not supported. For CloudTrail management events, supported fields include eventCategory (required), eventSource, and readOnly. The following additional fields are available for event data stores: eventName, eventType, sessionCredentialFromConsole, and userIdentity.arn. For CloudTrail data events, supported fields include eventCategory (required), resources.type (required), eventName, readOnly, and resources.ARN. The following additional fields are available for event data stores: eventSource, eventType, sessionCredentialFromConsole, and userIdentity.arn. For CloudTrail network activity events, supported fields include eventCategory (required), eventSource (required), eventName, errorCode, and vpcEndpointId. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the only supported field is eventCategory. readOnly - This is an optional field that is only used for management events and data events. This field can be set to Equals with a value of true or false. If you do not add this field, CloudTrail logs both read and write events. A value of true logs only read events. A value of false logs only write events. eventSource - This field is only used for management events, data events (for event data stores only), and network activity events. For management events for trails, this is an optional field that can be set to NotEquals kms.amazonaws.com to exclude KMS management events, or NotEquals rdsdata.amazonaws.com to exclude RDS management events. For management and data events for event data stores, you can use it to include or exclude any event source and can use any operator. For network activity events, this is a required field that only uses the Equals operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source. The following are valid values for network activity events: cloudtrail.amazonaws.com ec2.amazonaws.com kms.amazonaws.com secretsmanager.amazonaws.com eventName - This is an optional field that is only used for data events, management events (for event data stores only), and network activity events. You can use any operator with eventName. You can use it to filter in or filter out specific events. You can have multiple values for this field, separated by commas. eventCategory - This field is required and must be set to Equals. For CloudTrail management events, the value must be Management. For CloudTrail data events, the value must be Data. For CloudTrail network activity events, the value must be NetworkActivity. The following are used only for event data stores: For CloudTrail Insights events, the value must be Insight. For Config configuration items, the value must be ConfigurationItem. For Audit Manager evidence, the value must be Evidence. For events outside of Amazon Web Services, the value must be ActivityAuditLog. eventType - This is an optional field available only for event data stores, which is used to filter management and data events on the event type. For information about available event types, see CloudTrail record contents in the CloudTrail user guide. errorCode - This field is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid errorCode is VpceAccessDenied. errorCode can only use the Equals operator. sessionCredentialFromConsole - This is an optional field available only for event data stores, which is used to filter management and data events based on whether the events originated from an Amazon Web Services Management Console session. sessionCredentialFromConsole can only use the Equals and NotEquals operators. resources.type - This field is required for CloudTrail data events. resources.type can only use the Equals operator. For a list of available resource types for data events, see Data events in the CloudTrail User Guide. You can have only one resources.type field per selector. To log events on more than one resource type, add another selector. resources.ARN - The resources.ARN is an optional field for data events. You can use any operator with resources.ARN, but if you use Equals or NotEquals, the value must exactly match the ARN of a valid resource of the type you've specified in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the StartsWith operator, and include only the bucket ARN as the matching value. For information about filtering data events on the resources.ARN field, see Filtering data events by resources.ARN in the CloudTrail User Guide. You can't use the resources.ARN field to filter resource types that do not have ARNs. userIdentity.arn - This is an optional field available only for event data stores, which is used to filter management and data events on the userIdentity ARN. You can use any operator with userIdentity.arn. For more information on the userIdentity element, see CloudTrail userIdentity element in the CloudTrail User Guide. vpcEndpointId - This field is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with vpcEndpointId. + /// A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for selecting events as filtering is not supported. For more information, see AdvancedFieldSelector in the CloudTrailUser Guide. public let field: String /// An operator that excludes events that match the last few characters of the event record field specified as the value of Field. public let notEndsWith: [String]? diff --git a/Sources/Soto/Services/CloudWatchLogs/CloudWatchLogs_shapes.swift b/Sources/Soto/Services/CloudWatchLogs/CloudWatchLogs_shapes.swift index cecddd447d..a32d8975c1 100644 --- a/Sources/Soto/Services/CloudWatchLogs/CloudWatchLogs_shapes.swift +++ b/Sources/Soto/Services/CloudWatchLogs/CloudWatchLogs_shapes.swift @@ -1234,7 +1234,7 @@ extension CloudWatchLogs { } public func validate(name: String) throws { - try self.validate(self.integrationName, name: "integrationName", parent: name, max: 256) + try self.validate(self.integrationName, name: "integrationName", parent: name, max: 50) try self.validate(self.integrationName, name: "integrationName", parent: name, min: 1) try self.validate(self.integrationName, name: "integrationName", parent: name, pattern: "^[\\.\\-_/#A-Za-z0-9]+$") } @@ -3000,7 +3000,7 @@ extension CloudWatchLogs { } public func validate(name: String) throws { - try self.validate(self.integrationName, name: "integrationName", parent: name, max: 256) + try self.validate(self.integrationName, name: "integrationName", parent: name, max: 50) try self.validate(self.integrationName, name: "integrationName", parent: name, min: 1) try self.validate(self.integrationName, name: "integrationName", parent: name, pattern: "^[\\.\\-_/#A-Za-z0-9]+$") } @@ -3523,7 +3523,7 @@ extension CloudWatchLogs { } public func validate(name: String) throws { - try self.validate(self.integrationNamePrefix, name: "integrationNamePrefix", parent: name, max: 256) + try self.validate(self.integrationNamePrefix, name: "integrationNamePrefix", parent: name, max: 50) try self.validate(self.integrationNamePrefix, name: "integrationNamePrefix", parent: name, min: 1) try self.validate(self.integrationNamePrefix, name: "integrationNamePrefix", parent: name, pattern: "^[\\.\\-_/#A-Za-z0-9]+$") } @@ -5166,7 +5166,7 @@ extension CloudWatchLogs { } public func validate(name: String) throws { - try self.validate(self.integrationName, name: "integrationName", parent: name, max: 256) + try self.validate(self.integrationName, name: "integrationName", parent: name, max: 50) try self.validate(self.integrationName, name: "integrationName", parent: name, min: 1) try self.validate(self.integrationName, name: "integrationName", parent: name, pattern: "^[\\.\\-_/#A-Za-z0-9]+$") try self.resourceConfig.validate(name: "\(name).resourceConfig") diff --git a/Sources/Soto/Services/CodePipeline/CodePipeline_api.swift b/Sources/Soto/Services/CodePipeline/CodePipeline_api.swift index 6014b460b4..ff0760b267 100644 --- a/Sources/Soto/Services/CodePipeline/CodePipeline_api.swift +++ b/Sources/Soto/Services/CodePipeline/CodePipeline_api.swift @@ -454,7 +454,7 @@ public struct CodePipeline: AWSService { /// Returns information about an action type created for an external provider, where the action is to be used by customers of the external provider. The action can be created with any supported integration model. /// /// Parameters: - /// - category: Defines what kind of action can be taken in the stage. The following are the valid values: Source Build Test Deploy Approval Invoke + /// - category: Defines what kind of action can be taken in the stage. The following are the valid values: Source Build Test Deploy Approval Invoke Compute /// - owner: The creator of an action type that was created with any supported integration model. There are two valid values: AWS and ThirdParty. /// - provider: The provider of the action type being called. The provider name is specified when the action type is created. /// - version: A string that describes the action type version. @@ -811,7 +811,7 @@ public struct CodePipeline: AWSService { return try await self.listRuleExecutions(input, logger: logger) } - /// Lists the rules for the condition. + /// Lists the rules for the condition. For more information about conditions, see Stage conditions. For more information about rules, see the CodePipeline rule reference. @Sendable @inlinable public func listRuleTypes(_ input: ListRuleTypesInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListRuleTypesOutput { @@ -824,7 +824,7 @@ public struct CodePipeline: AWSService { logger: logger ) } - /// Lists the rules for the condition. + /// Lists the rules for the condition. For more information about conditions, see Stage conditions. For more information about rules, see the CodePipeline rule reference. /// /// Parameters: /// - regionFilter: The rule Region to filter on. diff --git a/Sources/Soto/Services/CodePipeline/CodePipeline_shapes.swift b/Sources/Soto/Services/CodePipeline/CodePipeline_shapes.swift index 3b66997c3b..4448ae7423 100644 --- a/Sources/Soto/Services/CodePipeline/CodePipeline_shapes.swift +++ b/Sources/Soto/Services/CodePipeline/CodePipeline_shapes.swift @@ -976,7 +976,7 @@ extension CodePipeline { } public struct ActionTypeId: AWSEncodableShape & AWSDecodableShape { - /// A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values. Source Build Test Deploy Invoke Approval + /// A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values. Source Build Test Deploy Invoke Approval Compute public let category: ActionCategory /// The creator of the action being called. There are three valid values for the Owner field in the action category section within your pipeline structure: AWS, ThirdParty, and Custom. For more information, see Valid Action Types and Providers in CodePipeline. public let owner: ActionOwner @@ -1960,7 +1960,7 @@ extension CodePipeline { } public struct GetActionTypeInput: AWSEncodableShape { - /// Defines what kind of action can be taken in the stage. The following are the valid values: Source Build Test Deploy Approval Invoke + /// Defines what kind of action can be taken in the stage. The following are the valid values: Source Build Test Deploy Approval Invoke Compute public let category: ActionCategory /// The creator of an action type that was created with any supported integration model. There are two valid values: AWS and ThirdParty. public let owner: String @@ -4056,11 +4056,13 @@ extension CodePipeline { } public struct RuleDeclaration: AWSEncodableShape & AWSDecodableShape { + /// The shell commands to run with your commands rule in CodePipeline. All commands are supported except multi-line formats. While CodeBuild logs and permissions are used, you do not need to create any resources in CodeBuild. Using compute time for this action will incur separate charges in CodeBuild. + public let commands: [String]? /// The action configuration fields for the rule. public let configuration: [String: String]? /// The input artifacts fields for the rule, such as specifying an input file for the rule. public let inputArtifacts: [InputArtifact]? - /// The name of the rule that is created for the condition, such as CheckAllResults. + /// The name of the rule that is created for the condition, such as VariableCheck. public let name: String /// The Region for the condition associated with the rule. public let region: String? @@ -4072,7 +4074,8 @@ extension CodePipeline { public let timeoutInMinutes: Int? @inlinable - public init(configuration: [String: String]? = nil, inputArtifacts: [InputArtifact]? = nil, name: String, region: String? = nil, roleArn: String? = nil, ruleTypeId: RuleTypeId, timeoutInMinutes: Int? = nil) { + public init(commands: [String]? = nil, configuration: [String: String]? = nil, inputArtifacts: [InputArtifact]? = nil, name: String, region: String? = nil, roleArn: String? = nil, ruleTypeId: RuleTypeId, timeoutInMinutes: Int? = nil) { + self.commands = commands self.configuration = configuration self.inputArtifacts = inputArtifacts self.name = name @@ -4083,6 +4086,12 @@ extension CodePipeline { } public func validate(name: String) throws { + try self.commands?.forEach { + try validate($0, name: "commands[]", parent: name, max: 1000) + try validate($0, name: "commands[]", parent: name, min: 1) + } + try self.validate(self.commands, name: "commands", parent: name, max: 50) + try self.validate(self.commands, name: "commands", parent: name, min: 1) try self.configuration?.forEach { try validate($0.key, name: "configuration.key", parent: name, max: 50) try validate($0.key, name: "configuration.key", parent: name, min: 1) @@ -4106,6 +4115,7 @@ extension CodePipeline { } private enum CodingKeys: String, CodingKey { + case commands = "commands" case configuration = "configuration" case inputArtifacts = "inputArtifacts" case name = "name" diff --git a/Sources/Soto/Services/CognitoIdentity/CognitoIdentity_api.swift b/Sources/Soto/Services/CognitoIdentity/CognitoIdentity_api.swift index 9d327a0cd9..9cd707489a 100644 --- a/Sources/Soto/Services/CognitoIdentity/CognitoIdentity_api.swift +++ b/Sources/Soto/Services/CognitoIdentity/CognitoIdentity_api.swift @@ -81,6 +81,46 @@ public struct CognitoIdentity: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "af-south-1": "cognito-identity.af-south-1.amazonaws.com", + "ap-east-1": "cognito-identity.ap-east-1.amazonaws.com", + "ap-northeast-1": "cognito-identity.ap-northeast-1.amazonaws.com", + "ap-northeast-2": "cognito-identity.ap-northeast-2.amazonaws.com", + "ap-northeast-3": "cognito-identity.ap-northeast-3.amazonaws.com", + "ap-south-1": "cognito-identity.ap-south-1.amazonaws.com", + "ap-south-2": "cognito-identity.ap-south-2.amazonaws.com", + "ap-southeast-1": "cognito-identity.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "cognito-identity.ap-southeast-2.amazonaws.com", + "ap-southeast-3": "cognito-identity.ap-southeast-3.amazonaws.com", + "ap-southeast-4": "cognito-identity.ap-southeast-4.amazonaws.com", + "ca-central-1": "cognito-identity.ca-central-1.amazonaws.com", + "ca-west-1": "cognito-identity.ca-west-1.amazonaws.com", + "cn-north-1": "cognito-identity.cn-north-1.amazonaws.com.cn", + "eu-central-1": "cognito-identity.eu-central-1.amazonaws.com", + "eu-central-2": "cognito-identity.eu-central-2.amazonaws.com", + "eu-north-1": "cognito-identity.eu-north-1.amazonaws.com", + "eu-south-1": "cognito-identity.eu-south-1.amazonaws.com", + "eu-south-2": "cognito-identity.eu-south-2.amazonaws.com", + "eu-west-1": "cognito-identity.eu-west-1.amazonaws.com", + "eu-west-2": "cognito-identity.eu-west-2.amazonaws.com", + "eu-west-3": "cognito-identity.eu-west-3.amazonaws.com", + "il-central-1": "cognito-identity.il-central-1.amazonaws.com", + "me-central-1": "cognito-identity.me-central-1.amazonaws.com", + "me-south-1": "cognito-identity.me-south-1.amazonaws.com", + "sa-east-1": "cognito-identity.sa-east-1.amazonaws.com", + "us-east-1": "cognito-identity.us-east-1.amazonaws.com", + "us-east-2": "cognito-identity.us-east-2.amazonaws.com", + "us-gov-west-1": "cognito-identity.us-gov-west-1.amazonaws.com", + "us-west-1": "cognito-identity.us-west-1.amazonaws.com", + "us-west-2": "cognito-identity.us-west-2.amazonaws.com" + ]), + [.dualstack, .fips]: .init(endpoints: [ + "us-east-1": "cognito-identity-fips.us-east-1.amazonaws.com", + "us-east-2": "cognito-identity-fips.us-east-2.amazonaws.com", + "us-gov-west-1": "cognito-identity-fips.us-gov-west-1.amazonaws.com", + "us-west-1": "cognito-identity-fips.us-west-1.amazonaws.com", + "us-west-2": "cognito-identity-fips.us-west-2.amazonaws.com" + ]), [.fips]: .init(endpoints: [ "us-east-1": "cognito-identity-fips.us-east-1.amazonaws.com", "us-east-2": "cognito-identity-fips.us-east-2.amazonaws.com", diff --git a/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_api.swift b/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_api.swift index 8aa8edfdfb..adfee537cd 100644 --- a/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_api.swift +++ b/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS CognitoIdentityProvider service. /// -/// With the Amazon Cognito user pools API, you can configure user pools and authenticate users. To authenticate users from third-party identity providers (IdPs) in this API, you can link IdP users to native user profiles. Learn more about the authentication and authorization of federated users at Adding user pool sign-in through a third party and in the User pool federation endpoints and hosted UI reference. This API reference provides detailed information about API operations and object types in Amazon Cognito. Along with resource management operations, the Amazon Cognito user pools API includes classes of operations and authorization models for client-side and server-side authentication of users. You can interact with operations in the Amazon Cognito user pools API as any of the following subjects. An administrator who wants to configure user pools, app clients, users, groups, or other user pool functions. A server-side app, like a web application, that wants to use its Amazon Web Services privileges to manage, authenticate, or authorize a user. A client-side app, like a mobile app, that wants to make unauthenticated requests to manage, authenticate, or authorize a user. For more information, see Using the Amazon Cognito user pools API and user pool endpoints in the Amazon Cognito Developer Guide. With your Amazon Web Services SDK, you can build the logic to support operational flows in every use case for this API. You can also make direct REST API requests to Amazon Cognito user pools service endpoints. The following links can get you started with the CognitoIdentityProvider client in other supported Amazon Web Services SDKs. Amazon Web Services Command Line Interface Amazon Web Services SDK for .NET Amazon Web Services SDK for C++ Amazon Web Services SDK for Go Amazon Web Services SDK for Java V2 Amazon Web Services SDK for JavaScript Amazon Web Services SDK for PHP V3 Amazon Web Services SDK for Python Amazon Web Services SDK for Ruby V3 To get started with an Amazon Web Services SDK, see Tools to Build on Amazon Web Services. For example actions and scenarios, see Code examples for Amazon Cognito Identity Provider using Amazon Web Services SDKs. +/// With the Amazon Cognito user pools API, you can configure user pools and authenticate users. To authenticate users from third-party identity providers (IdPs) in this API, you can link IdP users to native user profiles. Learn more about the authentication and authorization of federated users at Adding user pool sign-in through a third party and in the User pool federation endpoints and hosted UI reference. This API reference provides detailed information about API operations and object types in Amazon Cognito. Along with resource management operations, the Amazon Cognito user pools API includes classes of operations and authorization models for client-side and server-side authentication of users. You can interact with operations in the Amazon Cognito user pools API as any of the following subjects. An administrator who wants to configure user pools, app clients, users, groups, or other user pool functions. A server-side app, like a web application, that wants to use its Amazon Web Services privileges to manage, authenticate, or authorize a user. A client-side app, like a mobile app, that wants to make unauthenticated requests to manage, authenticate, or authorize a user. For more information, see Using the Amazon Cognito user pools API and user pool endpoints in the Amazon Cognito Developer Guide. With your Amazon Web Services SDK, you can build the logic to support operational flows in every use case for this API. You can also make direct REST API requests to Amazon Cognito user pools service endpoints. The following links can get you started with the CognitoIdentityProvider client in other supported Amazon Web Services SDKs. Amazon Web Services Command Line Interface Amazon Web Services SDK for .NET Amazon Web Services SDK for C++ Amazon Web Services SDK for Go Amazon Web Services SDK for Java V2 Amazon Web Services SDK for JavaScript Amazon Web Services SDK for PHP V3 Amazon Web Services SDK for Python Amazon Web Services SDK for Ruby V3 Amazon Web Services SDK for Kotlin To get started with an Amazon Web Services SDK, see Tools to Build on Amazon Web Services. For example actions and scenarios, see Code examples for Amazon Cognito Identity Provider using Amazon Web Services SDKs. public struct CognitoIdentityProvider: AWSService { // MARK: Member variables @@ -92,7 +92,7 @@ public struct CognitoIdentityProvider: AWSService { // MARK: API Calls - /// Adds additional user attributes to the user pool schema. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Adds additional user attributes to the user pool schema. Custom attributes can be mutable or immutable and have a custom: or dev: prefix. For more information, see Custom attributes. You can also create custom attributes in the Schema parameter of CreateUserPool and UpdateUserPool. You can't delete custom attributes after you create them. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func addCustomAttributes(_ input: AddCustomAttributesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AddCustomAttributesResponse { @@ -105,11 +105,11 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Adds additional user attributes to the user pool schema. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Adds additional user attributes to the user pool schema. Custom attributes can be mutable or immutable and have a custom: or dev: prefix. For more information, see Custom attributes. You can also create custom attributes in the Schema parameter of CreateUserPool and UpdateUserPool. You can't delete custom attributes after you create them. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - customAttributes: An array of custom attributes, such as Mutable and Name. - /// - userPoolId: The user pool ID for the user pool where you want to add custom attributes. + /// - customAttributes: An array of custom attribute names and other properties. Sets the following characteristics: AttributeDataType The expected data type. Can be a string, a number, a date and time, or a boolean. Mutable If true, you can grant app clients write access to the attribute value. If false, the attribute value can only be set up on sign-up or administrator creation of users. Name The attribute name. For an attribute like custom:myAttribute, enter myAttribute for this field. Required When true, users who sign up or are created must set a value for the attribute. NumberAttributeConstraints The minimum and maximum length of accepted values for a Number-type attribute. StringAttributeConstraints The minimum and maximum length of accepted values for a String-type attribute. DeveloperOnlyAttribute This legacy option creates an attribute with a dev: prefix. You can only set the value of a developer-only attribute with administrative IAM credentials. + /// - userPoolId: The ID of the user pool where you want to add custom attributes. /// - logger: Logger use during operation @inlinable public func addCustomAttributes( @@ -142,7 +142,7 @@ public struct CognitoIdentityProvider: AWSService { /// Parameters: /// - groupName: The name of the group that you want to add your user to. /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. - /// - userPoolId: The user pool ID for the user pool. + /// - userPoolId: The ID of the user pool that contains the group that you want to add the user to. /// - logger: Logger use during operation @inlinable public func adminAddUserToGroup( @@ -159,7 +159,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.adminAddUserToGroup(input, logger: logger) } - /// This IAM-authenticated API operation confirms user sign-up as an administrator. Unlike ConfirmSignUp, your IAM credentials authorize user account confirmation. No confirmation code is required. This request sets a user account active in a user pool that requires confirmation of new user accounts before they can sign in. You can configure your user pool to not send confirmation codes to new users and instead confirm them with this API operation on the back end. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Confirms user sign-up as an administrator. Unlike ConfirmSignUp, your IAM credentials authorize user account confirmation. No confirmation code is required. This request sets a user account active in a user pool that requires confirmation of new user accounts before they can sign in. You can configure your user pool to not send confirmation codes to new users and instead confirm them with this API operation on the back end. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints To configure your user pool to require administrative confirmation of users, set AllowAdminCreateUserOnly to true in a CreateUserPool or UpdateUserPool request. @Sendable @inlinable public func adminConfirmSignUp(_ input: AdminConfirmSignUpRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminConfirmSignUpResponse { @@ -172,12 +172,12 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// This IAM-authenticated API operation confirms user sign-up as an administrator. Unlike ConfirmSignUp, your IAM credentials authorize user account confirmation. No confirmation code is required. This request sets a user account active in a user pool that requires confirmation of new user accounts before they can sign in. You can configure your user pool to not send confirmation codes to new users and instead confirm them with this API operation on the back end. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Confirms user sign-up as an administrator. Unlike ConfirmSignUp, your IAM credentials authorize user account confirmation. No confirmation code is required. This request sets a user account active in a user pool that requires confirmation of new user accounts before they can sign in. You can configure your user pool to not send confirmation codes to new users and instead confirm them with this API operation on the back end. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints To configure your user pool to require administrative confirmation of users, set AllowAdminCreateUserOnly to true in a CreateUserPool or UpdateUserPool request. /// /// Parameters: /// - clientMetadata: A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. If your user pool configuration includes triggers, the AdminConfirmSignUp API action invokes the Lambda function that is specified for the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. In this payload, the clientMetadata attribute provides the data that you assigned to the ClientMetadata parameter in your AdminConfirmSignUp request. In your function code in Lambda, you can process the ClientMetadata value to enhance your workflow for your specific needs. For more information, see /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. - /// - userPoolId: The user pool ID for which you want to confirm user registration. + /// - userPoolId: The ID of the user pool where you want to confirm a user's sign-up request. /// - logger: Logger use during operation @inlinable public func adminConfirmSignUp( @@ -210,14 +210,14 @@ public struct CognitoIdentityProvider: AWSService { /// Creates a new user in the specified user pool. If MessageAction isn't set, the default is to send a welcome message via email or phone (SMS). This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. This message is based on a template that you configured in your call to create or update a user pool. This template includes your custom sign-up instructions and placeholders for user name and temporary password. Alternatively, you can call AdminCreateUser with SUPPRESS for the MessageAction parameter, and Amazon Cognito won't send any email. In either case, if the user has a password, they will be in the FORCE_CHANGE_PASSWORD state until they sign in and set their password. Your invitation message template must have the {####} password placeholder if your users have passwords. If your template doesn't have this placeholder, Amazon Cognito doesn't deliver the invitation message. In this case, you must update your message template and resend the password with a new AdminCreateUser request with a MessageAction value of RESEND. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - clientMetadata: A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminCreateUser API action, Amazon Cognito invokes the function that is assigned to the pre sign-up trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminCreateUser request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see - /// - desiredDeliveryMediums: Specify "EMAIL" if email will be used to send the welcome message. Specify "SMS" if the phone number will be used. The default value is "SMS". You can specify more than one value. - /// - forceAliasCreation: This parameter is used only if the phone_number_verified or email_verified attribute is set to True. Otherwise, it is ignored. If this parameter is set to True and the phone number or email address specified in the UserAttributes parameter already exists as an alias with a different user, the API call will migrate the alias from the previous user to the newly created user. The previous user will no longer be able to log in using that alias. If this parameter is set to False, the API throws an AliasExistsException error if the alias already exists. The default value is False. - /// - messageAction: Set to RESEND to resend the invitation message to a user that already exists and reset the expiration limit on the user's account. Set to SUPPRESS to suppress sending the message. You can specify only one value. + /// - clientMetadata: A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminCreateUser API action, Amazon Cognito invokes the function that is assigned to the pre sign-up trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a ClientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminCreateUser request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see + /// - desiredDeliveryMediums: Specify EMAIL if email will be used to send the welcome message. Specify SMS if the phone number will be used. The default value is SMS. You can specify more than one value. + /// - forceAliasCreation: This parameter is used only if the phone_number_verified or email_verified attribute is set to True. Otherwise, it is ignored. If this parameter is set to True and the phone number or email address specified in the UserAttributes parameter already exists as an alias with a different user, this request migrates the alias from the previous user to the newly-created user. The previous user will no longer be able to log in using that alias. If this parameter is set to False, the API throws an AliasExistsException error if the alias already exists. The default value is False. + /// - messageAction: Set to RESEND to resend the invitation message to a user that already exists, and to reset the temporary-password duration with a new temporary password. Set to SUPPRESS to suppress sending the message. You can specify only one value. /// - temporaryPassword: The user's temporary password. This password must conform to the password policy that you specified when you created the user pool. The exception to the requirement for a password is when your user pool supports passwordless sign-in with email or SMS OTPs. To create a user with no password, omit this parameter or submit a blank value. You can only create a passwordless user when passwordless sign-in is available. See the SignInPolicyType property of CreateUserPool and UpdateUserPool. The temporary password is valid only once. To complete the Admin Create User flow, the user must enter the temporary password in the sign-in page, along with a new password to be used in all future sign-ins. If you don't specify a value, Amazon Cognito generates one for you unless you have passwordless options active for your user pool. The temporary password can only be used until the user account expiration limit that you set for your user pool. To reset the account after that time limit, you must call AdminCreateUser again and specify RESEND for the MessageAction parameter. /// - userAttributes: An array of name-value pairs that contain user attributes and attribute values to be set for the user to be created. You can create a user without specifying any attributes other than Username. However, any attributes that you specify as required (when creating a user pool or in the Attributes tab of the console) either you should supply (in your call to AdminCreateUser) or the user should supply (when they sign up in response to your welcome message). For custom attributes, you must prepend the custom: prefix to the attribute name. To send a message inviting the user to sign up, you must specify the user's email address or phone number. You can do this in your call to AdminCreateUser or in the Users tab of the Amazon Cognito console for managing your user pools. You must also provide an email address or phone number when you expect the user to do passwordless sign-in with an email or SMS OTP. These attributes must be provided when passwordless options are the only available, or when you don't submit a TemporaryPassword. In your call to AdminCreateUser, you can set the email_verified attribute to True, and you can set the phone_number_verified attribute to True. You can also do this by calling AdminUpdateUserAttributes. email: The email address of the user to whom the message that contains the code and username will be sent. Required if the email_verified attribute is set to True, or if "EMAIL" is specified in the DesiredDeliveryMediums parameter. phone_number: The phone number of the user to whom the message that contains the code and username will be sent. Required if the phone_number_verified attribute is set to True, or if "SMS" is specified in the DesiredDeliveryMediums parameter. /// - username: The value that you want to set as the username sign-in attribute. The following conditions apply to the username parameter. The username can't be a duplicate of another username in the same user pool. You can't change the value of a username after you create it. You can only provide a value if usernames are a valid sign-in attribute for your user pool. If your user pool only supports phone numbers or email addresses as sign-in attributes, Amazon Cognito automatically generates a username value. For more information, see Customizing sign-in attributes. - /// - userPoolId: The user pool ID for the user pool where the user will be created. + /// - userPoolId: The ID of the user pool where you want to create a user. /// - validationData: Temporary user attributes that contribute to the outcomes of your pre sign-up Lambda trigger. This set of key-value pairs are for custom validation of information that you collect from your users but don't need to retain. Your Lambda function can analyze this additional data and act on it. Your function might perform external API operations like logging user attributes and validation data to Amazon CloudWatch Logs. Validation data might also affect the response that your function returns to Amazon Cognito, like automatically confirming the user if they sign up from within your network. For more information about the pre sign-up Lambda trigger, see Pre sign-up Lambda trigger. /// - logger: Logger use during operation @inlinable @@ -247,7 +247,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.adminCreateUser(input, logger: logger) } - /// Deletes a user as an administrator. Works on any user. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Deletes a user profile in your user pool. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func adminDeleteUser(_ input: AdminDeleteUserRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -260,11 +260,11 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Deletes a user as an administrator. Works on any user. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Deletes a user profile in your user pool. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. - /// - userPoolId: The user pool ID for the user pool where you want to delete the user. + /// - userPoolId: The ID of the user pool where you want to delete the user. /// - logger: Logger use during operation @inlinable public func adminDeleteUser( @@ -279,7 +279,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.adminDeleteUser(input, logger: logger) } - /// Deletes the user attributes in a user pool as an administrator. Works on any user. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Deletes attribute values from a user. This operation doesn't affect tokens for existing user sessions. The next ID token that the user receives will no longer have this attribute. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func adminDeleteUserAttributes(_ input: AdminDeleteUserAttributesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminDeleteUserAttributesResponse { @@ -292,12 +292,12 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Deletes the user attributes in a user pool as an administrator. Works on any user. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Deletes attribute values from a user. This operation doesn't affect tokens for existing user sessions. The next ID token that the user receives will no longer have this attribute. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: /// - userAttributeNames: An array of strings representing the user attribute names you want to delete. For custom attributes, you must prepend the custom: prefix to the attribute name. /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. - /// - userPoolId: The user pool ID for the user pool where you want to delete user attributes. + /// - userPoolId: The ID of the user pool where you want to delete user attributes. /// - logger: Logger use during operation @inlinable public func adminDeleteUserAttributes( @@ -330,8 +330,8 @@ public struct CognitoIdentityProvider: AWSService { /// Prevents the user from signing in with the specified external (SAML or social) identity provider (IdP). If the user that you want to deactivate is a Amazon Cognito user pools native username + password user, they can't use their password to sign in. If the user to deactivate is a linked external IdP user, any link between that user and an existing user is removed. When the external user signs in again, and the user is no longer attached to the previously linked DestinationUser, the user must create a new user account. See AdminLinkProviderForUser. The ProviderName must match the value specified when creating an IdP for the pool. To deactivate a native username + password user, the ProviderName value must be Cognito and the ProviderAttributeName must be Cognito_Subject. The ProviderAttributeValue must be the name that is used in the user pool for the user. The ProviderAttributeName must always be Cognito_Subject for social IdPs. The ProviderAttributeValue must always be the exact subject that was used when the user was originally linked as a source user. For de-linking a SAML identity, there are two scenarios. If the linked identity has not yet been used to sign in, the ProviderAttributeName and ProviderAttributeValue must be the same values that were used for the SourceUser when the identities were originally linked using AdminLinkProviderForUser call. (If the linking was done with ProviderAttributeName set to Cognito_Subject, the same applies here). However, if the user has already signed in, the ProviderAttributeName must be Cognito_Subject and ProviderAttributeValue must be the subject of the SAML assertion. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - user: The user to be disabled. - /// - userPoolId: The user pool ID for the user pool. + /// - user: The user profile that you want to delete a linked identity from. + /// - userPoolId: The ID of the user pool where you want to delete the user's linked identities. /// - logger: Logger use during operation @inlinable public func adminDisableProviderForUser( @@ -346,7 +346,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.adminDisableProviderForUser(input, logger: logger) } - /// Deactivates a user and revokes all access tokens for the user. A deactivated user can't sign in, but still appears in the responses to GetUser and ListUsers API requests. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Deactivates a user profile and revokes all access tokens for the user. A deactivated user can't sign in, but still appears in the responses to ListUsers API requests. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func adminDisableUser(_ input: AdminDisableUserRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminDisableUserResponse { @@ -359,11 +359,11 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Deactivates a user and revokes all access tokens for the user. A deactivated user can't sign in, but still appears in the responses to GetUser and ListUsers API requests. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Deactivates a user profile and revokes all access tokens for the user. A deactivated user can't sign in, but still appears in the responses to ListUsers API requests. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. - /// - userPoolId: The user pool ID for the user pool where you want to disable the user. + /// - userPoolId: The ID of the user pool where you want to disable the user. /// - logger: Logger use during operation @inlinable public func adminDisableUser( @@ -378,7 +378,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.adminDisableUser(input, logger: logger) } - /// Enables the specified user as an administrator. Works on any user. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Activate sign-in for a user profile that previously had sign-in access disabled. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func adminEnableUser(_ input: AdminEnableUserRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminEnableUserResponse { @@ -391,11 +391,11 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Enables the specified user as an administrator. Works on any user. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Activate sign-in for a user profile that previously had sign-in access disabled. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. - /// - userPoolId: The user pool ID for the user pool where you want to enable the user. + /// - userPoolId: The ID of the user pool where you want to activate sign-in for the user. /// - logger: Logger use during operation @inlinable public func adminEnableUser( @@ -410,7 +410,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.adminEnableUser(input, logger: logger) } - /// Forgets the device, as an administrator. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Forgets, or deletes, a remembered device from a user's profile. After you forget the device, the user can no longer complete device authentication with that device and when applicable, must submit MFA codes again. For more information, see Working with devices. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func adminForgetDevice(_ input: AdminForgetDeviceRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -423,12 +423,12 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Forgets the device, as an administrator. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Forgets, or deletes, a remembered device from a user's profile. After you forget the device, the user can no longer complete device authentication with that device and when applicable, must submit MFA codes again. For more information, see Working with devices. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - deviceKey: The device key. + /// - deviceKey: The key ID of the device that you want to delete. You can get device keys in the response to an AdminListDevices request. /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. - /// - userPoolId: The user pool ID. + /// - userPoolId: The ID of the user pool where the device owner is a user. /// - logger: Logger use during operation @inlinable public func adminForgetDevice( @@ -445,7 +445,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.adminForgetDevice(input, logger: logger) } - /// Gets the device, as an administrator. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Given the device key, returns details for a user' device. For more information, see Working with devices. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func adminGetDevice(_ input: AdminGetDeviceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminGetDeviceResponse { @@ -458,12 +458,12 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Gets the device, as an administrator. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Given the device key, returns details for a user' device. For more information, see Working with devices. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - deviceKey: The device key. + /// - deviceKey: The key of the device that you want to delete. You can get device IDs in the response to an AdminListDevices request. /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. - /// - userPoolId: The user pool ID. + /// - userPoolId: The ID of the user pool where the device owner is a user. /// - logger: Logger use during operation @inlinable public func adminGetDevice( @@ -480,7 +480,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.adminGetDevice(input, logger: logger) } - /// Gets the specified user by user name in a user pool as an administrator. Works on any user. This operation contributes to your monthly active user (MAU) count for the purpose of billing. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Given the username, returns details about a user profile in a user pool. This operation contributes to your monthly active user (MAU) count for the purpose of billing. You can specify alias attributes in the Username parameter. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func adminGetUser(_ input: AdminGetUserRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminGetUserResponse { @@ -493,11 +493,11 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Gets the specified user by user name in a user pool as an administrator. Works on any user. This operation contributes to your monthly active user (MAU) count for the purpose of billing. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Given the username, returns details about a user profile in a user pool. This operation contributes to your monthly active user (MAU) count for the purpose of billing. You can specify alias attributes in the Username parameter. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. - /// - userPoolId: The user pool ID for the user pool where you want to get information about the user. + /// - userPoolId: The ID of the user pool where you want to get information about the user. /// - logger: Logger use during operation @inlinable public func adminGetUser( @@ -512,7 +512,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.adminGetUser(input, logger: logger) } - /// Initiates the authentication flow, as an administrator. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Starts sign-in for applications with a server-side component, for example a traditional web application. This operation specifies the authentication flow that you'd like to begin. The authentication flow that you specify must be supported in your app client configuration. For more information about authentication flows, see Authentication flows. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func adminInitiateAuth(_ input: AdminInitiateAuthRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminInitiateAuthResponse { @@ -525,17 +525,17 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Initiates the authentication flow, as an administrator. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Starts sign-in for applications with a server-side component, for example a traditional web application. This operation specifies the authentication flow that you'd like to begin. The authentication flow that you specify must be supported in your app client configuration. For more information about authentication flows, see Authentication flows. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - analyticsMetadata: The analytics metadata for collecting Amazon Pinpoint metrics for AdminInitiateAuth calls. - /// - authFlow: The authentication flow that you want to initiate. The AuthParameters that you must submit are linked to the flow that you submit. For example: USER_AUTH: Request a preferred authentication type or review available authentication types. From the offered authentication types, select one in a challenge response and then authenticate with that method in an additional challenge response. REFRESH_TOKEN_AUTH: Receive new ID and access tokens when you pass a REFRESH_TOKEN parameter with a valid refresh token as the value. USER_SRP_AUTH: Receive secure remote password (SRP) variables for the next challenge, PASSWORD_VERIFIER, when you pass USERNAME and SRP_A parameters.. ADMIN_USER_PASSWORD_AUTH: Receive new tokens or the next challenge, for example SOFTWARE_TOKEN_MFA, when you pass USERNAME and PASSWORD parameters. Valid values include the following: USER_AUTH The entry point for sign-in with passwords, one-time passwords, biometric devices, and security keys. USER_SRP_AUTH Username-password authentication with the Secure Remote Password (SRP) protocol. For more information, see Use SRP password verification in custom authentication flow. REFRESH_TOKEN_AUTH and REFRESH_TOKEN Provide a valid refresh token and receive new ID and access tokens. For more information, see Using the refresh token. CUSTOM_AUTH Custom authentication with Lambda triggers. For more information, see Custom authentication challenge Lambda triggers. ADMIN_USER_PASSWORD_AUTH Username-password authentication with the password sent directly in the request. For more information, see Admin authentication flow. USER_PASSWORD_AUTH is a flow type of InitiateAuth and isn't valid for AdminInitiateAuth. + /// - analyticsMetadata: The analytics metadata for collecting Amazon Pinpoint metrics. + /// - authFlow: The authentication flow that you want to initiate. Each AuthFlow has linked AuthParameters that you must submit. The following are some example flows and their parameters. USER_AUTH: Request a preferred authentication type or review available authentication types. From the offered authentication types, select one in a challenge response and then authenticate with that method in an additional challenge response. REFRESH_TOKEN_AUTH: Receive new ID and access tokens when you pass a REFRESH_TOKEN parameter with a valid refresh token as the value. USER_SRP_AUTH: Receive secure remote password (SRP) variables for the next challenge, PASSWORD_VERIFIER, when you pass USERNAME and SRP_A parameters.. ADMIN_USER_PASSWORD_AUTH: Receive new tokens or the next challenge, for example SOFTWARE_TOKEN_MFA, when you pass USERNAME and PASSWORD parameters. All flows USER_AUTH The entry point for sign-in with passwords, one-time passwords, and WebAuthN authenticators. USER_SRP_AUTH Username-password authentication with the Secure Remote Password (SRP) protocol. For more information, see Use SRP password verification in custom authentication flow. REFRESH_TOKEN_AUTH and REFRESH_TOKEN Provide a valid refresh token and receive new ID and access tokens. For more information, see Using the refresh token. CUSTOM_AUTH Custom authentication with Lambda triggers. For more information, see Custom authentication challenge Lambda triggers. ADMIN_USER_PASSWORD_AUTH Username-password authentication with the password sent directly in the request. For more information, see Admin authentication flow. USER_PASSWORD_AUTH is a flow type of InitiateAuth and isn't valid for AdminInitiateAuth. /// - authParameters: The authentication parameters. These are inputs corresponding to the AuthFlow that you're invoking. The required values depend on the value of AuthFlow: For USER_AUTH: USERNAME (required), PREFERRED_CHALLENGE. If you don't provide a value for PREFERRED_CHALLENGE, Amazon Cognito responds with the AvailableChallenges parameter that specifies the available sign-in methods. For USER_SRP_AUTH: USERNAME (required), SRP_A (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY. For ADMIN_USER_PASSWORD_AUTH: USERNAME (required), PASSWORD (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY. For REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY. For CUSTOM_AUTH: USERNAME (required), SECRET_HASH (if app client is configured with client secret), DEVICE_KEY. To start the authentication flow with password verification, include ChallengeName: SRP_A and SRP_A: (The SRP_A Value). For more information about SECRET_HASH, see Computing secret hash values. For information about DEVICE_KEY, see Working with user devices in your user pool. - /// - clientId: The app client ID. + /// - clientId: The ID of the app client where the user wants to sign in. /// - clientMetadata: A map of custom key-value pairs that you can provide as input for certain custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminInitiateAuth API action, Amazon Cognito invokes the Lambda functions that are specified for various triggers. The ClientMetadata value is passed as input to the functions for only the following triggers: Pre signup Pre authentication User migration When Amazon Cognito invokes the functions for these triggers, it passes a JSON payload, which the function receives as input. This payload contains a validationData attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminInitiateAuth request. In your function code in Lambda, you can process the validationData value to enhance your workflow for your specific needs. When you use the AdminInitiateAuth API action, Amazon Cognito also invokes the functions for the following triggers, but it doesn't provide the ClientMetadata value as input: Post authentication Custom message Pre token generation Create auth challenge Define auth challenge Custom email sender Custom SMS sender For more information, see /// - contextData: Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced - /// - session: The optional session ID from a ConfirmSignUp API request. You can sign in a user directly from the sign-up process with the USER_AUTH authentication flow. - /// - userPoolId: The ID of the Amazon Cognito user pool. + /// - session: The optional session ID from a ConfirmSignUp API request. You can sign in a user directly from the sign-up process with an AuthFlow of USER_AUTH and AuthParameters of EMAIL_OTP or SMS_OTP, depending on how your user pool sent the confirmation-code message. + /// - userPoolId: The ID of the user pool where the user wants to sign in. /// - logger: Logger use during operation @inlinable public func adminInitiateAuth( @@ -580,7 +580,7 @@ public struct CognitoIdentityProvider: AWSService { /// Parameters: /// - destinationUser: The existing user in the user pool that you want to assign to the external IdP user account. This user can be a local (Username + Password) Amazon Cognito user pools user or a federated user (for example, a SAML or Facebook user). If the user doesn't exist, Amazon Cognito generates an exception. Amazon Cognito returns this user when the new user (with the linked IdP attribute) signs in. For a native username + password user, the ProviderAttributeValue for the DestinationUser should be the username in the user pool. For a federated user, it should be the provider-specific user_id. The ProviderAttributeName of the DestinationUser is ignored. The ProviderName should be set to Cognito for users in Cognito user pools. All attributes in the DestinationUser profile must be mutable. If you have assigned the user any immutable custom attributes, the operation won't succeed. /// - sourceUser: An external IdP account for a user who doesn't exist yet in the user pool. This user must be a federated user (for example, a SAML or Facebook user), not another native user. If the SourceUser is using a federated social IdP, such as Facebook, Google, or Login with Amazon, you must set the ProviderAttributeName to Cognito_Subject. For social IdPs, the ProviderName will be Facebook, Google, or LoginWithAmazon, and Amazon Cognito will automatically parse the Facebook, Google, and Login with Amazon tokens for id, sub, and user_id, respectively. The ProviderAttributeValue for the user must be the same value as the id, sub, or user_id value found in the social IdP token. For OIDC, the ProviderAttributeName can be any mapped value from a claim in the ID token, or that your app retrieves from the userInfo endpoint. For SAML, the ProviderAttributeName can be any mapped value from a claim in the SAML assertion. The following additional considerations apply to SourceUser for OIDC and SAML providers. You must map the claim to a user pool attribute in your IdP configuration, and set the user pool attribute name as the value of ProviderAttributeName in your AdminLinkProviderForUser request. For example, email. When you set ProviderAttributeName to Cognito_Subject, Amazon Cognito will automatically parse the default unique identifier found in the subject from the IdP token. - /// - userPoolId: The user pool ID for the user pool. + /// - userPoolId: The ID of the user pool where you want to link a federated identity. /// - logger: Logger use during operation @inlinable public func adminLinkProviderForUser( @@ -597,7 +597,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.adminLinkProviderForUser(input, logger: logger) } - /// Lists a user's registered devices. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Lists a user's registered devices. Remembered devices are used in authentication services where you offer a "Remember me" option for users who you want to permit to sign in without MFA from a trusted device. Users can bypass MFA while your application performs device SRP authentication on the back end. For more information, see Working with devices. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func adminListDevices(_ input: AdminListDevicesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminListDevicesResponse { @@ -610,13 +610,13 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Lists a user's registered devices. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Lists a user's registered devices. Remembered devices are used in authentication services where you offer a "Remember me" option for users who you want to permit to sign in without MFA from a trusted device. Users can bypass MFA while your application performs device SRP authentication on the back end. For more information, see Working with devices. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - limit: The limit of the devices request. + /// - limit: The maximum number of devices that you want Amazon Cognito to return in the response. /// - paginationToken: This API operation returns a limited number of results. The pagination token is /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. - /// - userPoolId: The user pool ID. + /// - userPoolId: The ID of the user pool where the device owner is a user. /// - logger: Logger use during operation @inlinable public func adminListDevices( @@ -635,7 +635,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.adminListDevices(input, logger: logger) } - /// Lists the groups that a user belongs to. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Lists the groups that a user belongs to. User pool groups are identifiers that you can reference from the contents of ID and access tokens, and set preferred IAM roles for identity-pool authentication. For more information, see Adding groups to a user pool. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func adminListGroupsForUser(_ input: AdminListGroupsForUserRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminListGroupsForUserResponse { @@ -648,13 +648,13 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Lists the groups that a user belongs to. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Lists the groups that a user belongs to. User pool groups are identifiers that you can reference from the contents of ID and access tokens, and set preferred IAM roles for identity-pool authentication. For more information, see Adding groups to a user pool. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - limit: The limit of the request to list groups. - /// - nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list. + /// - limit: The maximum number of groups that you want Amazon Cognito to return in the response. + /// - nextToken: This API operation returns a limited number of results. The pagination token is /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. - /// - userPoolId: The user pool ID for the user pool. + /// - userPoolId: The ID of the user pool where you want to view a user's groups. /// - logger: Logger use during operation @inlinable public func adminListGroupsForUser( @@ -673,7 +673,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.adminListGroupsForUser(input, logger: logger) } - /// A history of user activity and any risks detected as part of Amazon Cognito advanced security. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Requests a history of user activity and any risks detected as part of Amazon Cognito threat protection. For more information, see Viewing user event history. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func adminListUserAuthEvents(_ input: AdminListUserAuthEventsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminListUserAuthEventsResponse { @@ -686,13 +686,13 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// A history of user activity and any risks detected as part of Amazon Cognito advanced security. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Requests a history of user activity and any risks detected as part of Amazon Cognito threat protection. For more information, see Viewing user event history. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: /// - maxResults: The maximum number of authentication events to return. Returns 60 events if you set MaxResults to 0, or if you don't include a MaxResults parameter. - /// - nextToken: A pagination token. + /// - nextToken: This API operation returns a limited number of results. The pagination token is /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. - /// - userPoolId: The user pool ID. + /// - userPoolId: The Id of the user pool that contains the user profile with the logged events. /// - logger: Logger use during operation @inlinable public func adminListUserAuthEvents( @@ -711,7 +711,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.adminListUserAuthEvents(input, logger: logger) } - /// Removes the specified user from the specified group. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Given a username and a group name. removes them from the group. User pool groups are identifiers that you can reference from the contents of ID and access tokens, and set preferred IAM roles for identity-pool authentication. For more information, see Adding groups to a user pool. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func adminRemoveUserFromGroup(_ input: AdminRemoveUserFromGroupRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -724,12 +724,12 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Removes the specified user from the specified group. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Given a username and a group name. removes them from the group. User pool groups are identifiers that you can reference from the contents of ID and access tokens, and set preferred IAM roles for identity-pool authentication. For more information, see Adding groups to a user pool. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - groupName: The group name. + /// - groupName: The name of the group that you want to remove the user from, for example MyTestGroup. /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. - /// - userPoolId: The user pool ID for the user pool. + /// - userPoolId: The ID of the user pool that contains the group and the user that you want to remove. /// - logger: Logger use during operation @inlinable public func adminRemoveUserFromGroup( @@ -746,7 +746,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.adminRemoveUserFromGroup(input, logger: logger) } - /// Resets the specified user's password in a user pool as an administrator. Works on any user. To use this API operation, your user pool must have self-service account recovery configured. Use AdminSetUserPassword if you manage passwords as an administrator. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Deactivates a user's password, requiring them to change it. If a user tries to sign in after the API is called, Amazon Cognito responds with a PasswordResetRequiredException error. Your app must then perform the actions that reset your user's password: the forgot-password flow. In addition, if the user pool has phone verification selected and a verified phone number exists for the user, or if email verification is selected and a verified email exists for the user, calling this API will also result in sending a message to the end user with the code to change their password. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Resets the specified user's password in a user pool. This operation doesn't change the user's password, but sends a password-reset code. This operation is the administrative authentication API equivalent to ForgotPassword. This operation deactivates a user's password, requiring them to change it. If a user tries to sign in after the API request, Amazon Cognito responds with a PasswordResetRequiredException error. Your app must then complete the forgot-password flow by prompting the user for their code and a new password, then submitting those values in a ConfirmForgotPassword request. In addition, if the user pool has phone verification selected and a verified phone number exists for the user, or if email verification is selected and a verified email exists for the user, calling this API will also result in sending a message to the end user with the code to change their password. To use this API operation, your user pool must have self-service account recovery configured. Use AdminSetUserPassword if you manage passwords as an administrator. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func adminResetUserPassword(_ input: AdminResetUserPasswordRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminResetUserPasswordResponse { @@ -759,12 +759,12 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Resets the specified user's password in a user pool as an administrator. Works on any user. To use this API operation, your user pool must have self-service account recovery configured. Use AdminSetUserPassword if you manage passwords as an administrator. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Deactivates a user's password, requiring them to change it. If a user tries to sign in after the API is called, Amazon Cognito responds with a PasswordResetRequiredException error. Your app must then perform the actions that reset your user's password: the forgot-password flow. In addition, if the user pool has phone verification selected and a verified phone number exists for the user, or if email verification is selected and a verified email exists for the user, calling this API will also result in sending a message to the end user with the code to change their password. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Resets the specified user's password in a user pool. This operation doesn't change the user's password, but sends a password-reset code. This operation is the administrative authentication API equivalent to ForgotPassword. This operation deactivates a user's password, requiring them to change it. If a user tries to sign in after the API request, Amazon Cognito responds with a PasswordResetRequiredException error. Your app must then complete the forgot-password flow by prompting the user for their code and a new password, then submitting those values in a ConfirmForgotPassword request. In addition, if the user pool has phone verification selected and a verified phone number exists for the user, or if email verification is selected and a verified email exists for the user, calling this API will also result in sending a message to the end user with the code to change their password. To use this API operation, your user pool must have self-service account recovery configured. Use AdminSetUserPassword if you manage passwords as an administrator. This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - clientMetadata: A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminResetUserPassword API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminResetUserPassword request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see + /// - clientMetadata: A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. The AdminResetUserPassword API operation invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminResetUserPassword request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. - /// - userPoolId: The user pool ID for the user pool where you want to reset the user's password. + /// - userPoolId: The ID of the user pool where you want to reset the user's password. /// - logger: Logger use during operation @inlinable public func adminResetUserPassword( @@ -798,13 +798,13 @@ public struct CognitoIdentityProvider: AWSService { /// /// Parameters: /// - analyticsMetadata: The analytics metadata for collecting Amazon Pinpoint metrics for AdminRespondToAuthChallenge calls. - /// - challengeName: The challenge name. For more information, see AdminInitiateAuth. + /// - challengeName: The name of the challenge that you are responding to. You can find more information about values for ChallengeName in the response parameters of AdminInitiateAuth. /// - challengeResponses: The responses to the challenge that you received in the previous request. Each challenge has its own required response parameters. The following examples are partial JSON request bodies that highlight challenge-response parameters. You must provide a SECRET_HASH parameter in all challenge responses to an app client that has a client secret. Include a DEVICE_KEY for device authentication. SELECT_CHALLENGE "ChallengeName": "SELECT_CHALLENGE", "ChallengeResponses": { "USERNAME": "[username]", "ANSWER": "[Challenge name]"} Available challenges are PASSWORD, PASSWORD_SRP, EMAIL_OTP, SMS_OTP, and WEB_AUTHN. Complete authentication in the SELECT_CHALLENGE response for PASSWORD, PASSWORD_SRP, and WEB_AUTHN: "ChallengeName": "SELECT_CHALLENGE", "ChallengeResponses": { "ANSWER": "WEB_AUTHN", "USERNAME": "[username]", "CREDENTIAL": "[AuthenticationResponseJSON]"} See AuthenticationResponseJSON. "ChallengeName": "SELECT_CHALLENGE", "ChallengeResponses": { "ANSWER": "PASSWORD", "USERNAME": "[username]", "PASSWORD": "[password]"} "ChallengeName": "SELECT_CHALLENGE", "ChallengeResponses": { "ANSWER": "PASSWORD_SRP", "USERNAME": "[username]", "SRP_A": "[SRP_A]"} For SMS_OTP and EMAIL_OTP, respond with the username and answer. Your user pool will send a code for the user to submit in the next challenge response. "ChallengeName": "SELECT_CHALLENGE", "ChallengeResponses": { "ANSWER": "SMS_OTP", "USERNAME": "[username]"} "ChallengeName": "SELECT_CHALLENGE", "ChallengeResponses": { "ANSWER": "EMAIL_OTP", "USERNAME": "[username]"} SMS_OTP "ChallengeName": "SMS_OTP", "ChallengeResponses": {"SMS_OTP_CODE": "[code]", "USERNAME": "[username]"} EMAIL_OTP "ChallengeName": "EMAIL_OTP", "ChallengeResponses": {"EMAIL_OTP_CODE": "[code]", "USERNAME": "[username]"} SMS_MFA "ChallengeName": "SMS_MFA", "ChallengeResponses": {"SMS_MFA_CODE": "[code]", "USERNAME": "[username]"} PASSWORD_VERIFIER This challenge response is part of the SRP flow. Amazon Cognito requires that your application respond to this challenge within a few seconds. When the response time exceeds this period, your user pool returns a NotAuthorizedException error. "ChallengeName": "PASSWORD_VERIFIER", "ChallengeResponses": {"PASSWORD_CLAIM_SIGNATURE": "[claim_signature]", "PASSWORD_CLAIM_SECRET_BLOCK": "[secret_block]", "TIMESTAMP": [timestamp], "USERNAME": "[username]"} Add "DEVICE_KEY" when you sign in with a remembered device. CUSTOM_CHALLENGE "ChallengeName": "CUSTOM_CHALLENGE", "ChallengeResponses": {"USERNAME": "[username]", "ANSWER": "[challenge_answer]"} Add "DEVICE_KEY" when you sign in with a remembered device. NEW_PASSWORD_REQUIRED "ChallengeName": "NEW_PASSWORD_REQUIRED", "ChallengeResponses": {"NEW_PASSWORD": "[new_password]", "USERNAME": "[username]"} To set any required attributes that InitiateAuth returned in an requiredAttributes parameter, add "userAttributes.[attribute_name]": "[attribute_value]". This parameter can also set values for writable attributes that aren't required by your user pool. In a NEW_PASSWORD_REQUIRED challenge response, you can't modify a required attribute that already has a value. - /// - clientId: The app client ID. - /// - clientMetadata: A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminRespondToAuthChallenge API action, Amazon Cognito invokes any functions that you have assigned to the following triggers: pre sign-up custom message post authentication user migration pre token generation define auth challenge create auth challenge verify auth challenge response When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute that provides the data that you assigned to the ClientMetadata parameter in your AdminRespondToAuthChallenge request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see + /// - clientId: The ID of the app client where you initiated sign-in. + /// - clientMetadata: A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminRespondToAuthChallenge API action, Amazon Cognito invokes any functions that you have assigned to the following triggers: Pre sign-up custom message Post authentication User migration Pre token generation Define auth challenge Create auth challenge Verify auth challenge response When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute that provides the data that you assigned to the ClientMetadata parameter in your AdminRespondToAuthChallenge request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see /// - contextData: Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced - /// - session: The session that should be passed both ways in challenge-response calls to the service. If an InitiateAuth or RespondToAuthChallenge API call determines that the caller must pass another challenge, it returns a session with other challenge parameters. This session should be passed as it is to the next RespondToAuthChallenge API call. - /// - userPoolId: The ID of the Amazon Cognito user pool. + /// - session: The session identifier that maintains the state of authentication requests and challenge responses. If an AdminInitiateAuth or AdminRespondToAuthChallenge API request results in a determination that your application must pass another challenge, Amazon Cognito returns a session with other challenge parameters. Send this session identifier, unmodified, to the next AdminRespondToAuthChallenge request. + /// - userPoolId: The ID of the user pool where you want to respond to an authentication challenge. /// - logger: Logger use during operation @inlinable public func adminRespondToAuthChallenge( @@ -831,7 +831,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.adminRespondToAuthChallenge(input, logger: logger) } - /// Sets the user's multi-factor authentication (MFA) preference, including which MFA options are activated, and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Sets the user's multi-factor authentication (MFA) preference, including which MFA options are activated, and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. This operation doesn't reset an existing TOTP MFA for a user. To register a new TOTP factor for a user, make an AssociateSoftwareToken request. For more information, see TOTP software token MFA. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func adminSetUserMFAPreference(_ input: AdminSetUserMFAPreferenceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminSetUserMFAPreferenceResponse { @@ -844,7 +844,7 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Sets the user's multi-factor authentication (MFA) preference, including which MFA options are activated, and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Sets the user's multi-factor authentication (MFA) preference, including which MFA options are activated, and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. This operation doesn't reset an existing TOTP MFA for a user. To register a new TOTP factor for a user, make an AssociateSoftwareToken request. For more information, see TOTP software token MFA. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: /// - emailMfaSettings: User preferences for email message MFA. Activates or deactivates email MFA and sets it as the preferred MFA method when multiple methods are available. To activate this setting, advanced security features must be active in your user pool. @@ -872,7 +872,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.adminSetUserMFAPreference(input, logger: logger) } - /// Sets the specified user's password in a user pool as an administrator. Works on any user. The password can be temporary or permanent. If it is temporary, the user status enters the FORCE_CHANGE_PASSWORD state. When the user next tries to sign in, the InitiateAuth/AdminInitiateAuth response will contain the NEW_PASSWORD_REQUIRED challenge. If the user doesn't sign in before it expires, the user won't be able to sign in, and an administrator must reset their password. Once the user has set a new password, or the password is permanent, the user status is set to Confirmed. AdminSetUserPassword can set a password for the user profile that Amazon Cognito creates for third-party federated users. When you set a password, the federated user's status changes from EXTERNAL_PROVIDER to CONFIRMED. A user in this state can sign in as a federated user, and initiate authentication flows in the API like a linked native user. They can also modify their password and attributes in token-authenticated API requests like ChangePassword and UpdateUserAttributes. As a best security practice and to keep users in sync with your external IdP, don't set passwords on federated user profiles. To set up a federated user for native sign-in with a linked native user, refer to Linking federated users to an existing user profile. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Sets the specified user's password in a user pool. This operation administratively sets a temporary or permanent password for a user. With this operation, you can bypass self-service password changes and permit immediate sign-in with the password that you set. To do this, set Permanent to true. You can also set a new temporary password in this request, send it to a user, and require them to choose a new password on their next sign-in. To do this, set Permanent to false. If the password is temporary, the user's Status becomes FORCE_CHANGE_PASSWORD. When the user next tries to sign in, the InitiateAuth or AdminInitiateAuth response includes the NEW_PASSWORD_REQUIRED challenge. If the user doesn't sign in before the temporary password expires, they can no longer sign in and you must repeat this operation to set a temporary or permanent password for them. After the user sets a new password, or if you set a permanent password, their status becomes Confirmed. AdminSetUserPassword can set a password for the user profile that Amazon Cognito creates for third-party federated users. When you set a password, the federated user's status changes from EXTERNAL_PROVIDER to CONFIRMED. A user in this state can sign in as a federated user, and initiate authentication flows in the API like a linked native user. They can also modify their password and attributes in token-authenticated API requests like ChangePassword and UpdateUserAttributes. As a best security practice and to keep users in sync with your external IdP, don't set passwords on federated user profiles. To set up a federated user for native sign-in with a linked native user, refer to Linking federated users to an existing user profile. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func adminSetUserPassword(_ input: AdminSetUserPasswordRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminSetUserPasswordResponse { @@ -885,13 +885,13 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Sets the specified user's password in a user pool as an administrator. Works on any user. The password can be temporary or permanent. If it is temporary, the user status enters the FORCE_CHANGE_PASSWORD state. When the user next tries to sign in, the InitiateAuth/AdminInitiateAuth response will contain the NEW_PASSWORD_REQUIRED challenge. If the user doesn't sign in before it expires, the user won't be able to sign in, and an administrator must reset their password. Once the user has set a new password, or the password is permanent, the user status is set to Confirmed. AdminSetUserPassword can set a password for the user profile that Amazon Cognito creates for third-party federated users. When you set a password, the federated user's status changes from EXTERNAL_PROVIDER to CONFIRMED. A user in this state can sign in as a federated user, and initiate authentication flows in the API like a linked native user. They can also modify their password and attributes in token-authenticated API requests like ChangePassword and UpdateUserAttributes. As a best security practice and to keep users in sync with your external IdP, don't set passwords on federated user profiles. To set up a federated user for native sign-in with a linked native user, refer to Linking federated users to an existing user profile. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Sets the specified user's password in a user pool. This operation administratively sets a temporary or permanent password for a user. With this operation, you can bypass self-service password changes and permit immediate sign-in with the password that you set. To do this, set Permanent to true. You can also set a new temporary password in this request, send it to a user, and require them to choose a new password on their next sign-in. To do this, set Permanent to false. If the password is temporary, the user's Status becomes FORCE_CHANGE_PASSWORD. When the user next tries to sign in, the InitiateAuth or AdminInitiateAuth response includes the NEW_PASSWORD_REQUIRED challenge. If the user doesn't sign in before the temporary password expires, they can no longer sign in and you must repeat this operation to set a temporary or permanent password for them. After the user sets a new password, or if you set a permanent password, their status becomes Confirmed. AdminSetUserPassword can set a password for the user profile that Amazon Cognito creates for third-party federated users. When you set a password, the federated user's status changes from EXTERNAL_PROVIDER to CONFIRMED. A user in this state can sign in as a federated user, and initiate authentication flows in the API like a linked native user. They can also modify their password and attributes in token-authenticated API requests like ChangePassword and UpdateUserAttributes. As a best security practice and to keep users in sync with your external IdP, don't set passwords on federated user profiles. To set up a federated user for native sign-in with a linked native user, refer to Linking federated users to an existing user profile. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - password: The password for the user. - /// - permanent: True if the password is permanent, False if it is temporary. + /// - password: The new temporary or permanent password that you want to set for the user. You can't remove the password for a user who already has a password so that they can only sign in with passwordless methods. In this scenario, you must create a new user without a password. + /// - permanent: Set to true to set a password that the user can immediately sign in with. Set to false to set a temporary password that the user must change on their next sign-in. /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. - /// - userPoolId: The user pool ID for the user pool where you want to set the user's password. + /// - userPoolId: The ID of the user pool where you want to set the user's password. /// - logger: Logger use during operation @inlinable public func adminSetUserPassword( @@ -945,7 +945,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.adminSetUserSettings(input, logger: logger) } - /// Provides feedback for an authentication event indicating if it was from a valid user. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Provides feedback for an authentication event indicating if it was from a valid user. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito threat protection. To train the threat-protection model to recognize trusted and untrusted sign-in characteristics, configure threat protection in audit-only mode and provide a mechanism for users or administrators to submit feedback. Your feedback can tell Amazon Cognito that a risk rating was assigned at a level you don't agree with. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func adminUpdateAuthEventFeedback(_ input: AdminUpdateAuthEventFeedbackRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminUpdateAuthEventFeedbackResponse { @@ -958,13 +958,13 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Provides feedback for an authentication event indicating if it was from a valid user. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Provides feedback for an authentication event indicating if it was from a valid user. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito threat protection. To train the threat-protection model to recognize trusted and untrusted sign-in characteristics, configure threat protection in audit-only mode and provide a mechanism for users or administrators to submit feedback. Your feedback can tell Amazon Cognito that a risk rating was assigned at a level you don't agree with. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - eventId: The authentication event ID. + /// - eventId: The authentication event ID. To query authentication events for a user, see AdminListUserAuthEvents. /// - feedbackValue: The authentication event feedback value. When you provide a FeedbackValue /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. - /// - userPoolId: The user pool ID. + /// - userPoolId: The ID of the user pool where you want to submit authentication-event feedback. /// - logger: Logger use during operation @inlinable public func adminUpdateAuthEventFeedback( @@ -983,7 +983,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.adminUpdateAuthEventFeedback(input, logger: logger) } - /// Updates the device status as an administrator. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Updates the status of a user's device so that it is marked as remembered or not remembered for the purpose of device authentication. Device authentication is a "remember me" mechanism that silently completes sign-in from trusted devices with a device key instead of a user-provided MFA code. This operation changes the status of a device without deleting it, so you can enable it again later. For more information about device authentication, see Working with devices. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func adminUpdateDeviceStatus(_ input: AdminUpdateDeviceStatusRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminUpdateDeviceStatusResponse { @@ -996,13 +996,13 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Updates the device status as an administrator. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Updates the status of a user's device so that it is marked as remembered or not remembered for the purpose of device authentication. Device authentication is a "remember me" mechanism that silently completes sign-in from trusted devices with a device key instead of a user-provided MFA code. This operation changes the status of a device without deleting it, so you can enable it again later. For more information about device authentication, see Working with devices. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - deviceKey: The device key. - /// - deviceRememberedStatus: The status indicating whether a device has been remembered or not. + /// - deviceKey: The unique identifier, or device key, of the device that you want to update the status for. + /// - deviceRememberedStatus: To enable device authentication with the specified device, set to remembered.To disable, set to not_remembered. /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. - /// - userPoolId: The user pool ID. + /// - userPoolId: The ID of the user pool where you want to change a user's device status. /// - logger: Logger use during operation @inlinable public func adminUpdateDeviceStatus( @@ -1021,7 +1021,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.adminUpdateDeviceStatus(input, logger: logger) } - /// This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Updates the specified user's attributes, including developer attributes, as an administrator. Works on any user. To delete an attribute from your user, submit the attribute in your API request with a blank value. For custom attributes, you must prepend the custom: prefix to the attribute name. In addition to updating user attributes, this API can also be used to mark phone and email as verified. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Updates the specified user's attributes. To delete an attribute from your user, submit the attribute in your API request with a blank value. For custom attributes, you must prepend the custom: prefix to the attribute name. This operation can set a user's email address or phone number as verified and permit immediate sign-in in user pools that require verification of these attributes. To do this, set the email_verified or phone_number_verified attribute to true. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func adminUpdateUserAttributes(_ input: AdminUpdateUserAttributesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminUpdateUserAttributesResponse { @@ -1034,13 +1034,13 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Updates the specified user's attributes, including developer attributes, as an administrator. Works on any user. To delete an attribute from your user, submit the attribute in your API request with a blank value. For custom attributes, you must prepend the custom: prefix to the attribute name. In addition to updating user attributes, this API can also be used to mark phone and email as verified. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Updates the specified user's attributes. To delete an attribute from your user, submit the attribute in your API request with a blank value. For custom attributes, you must prepend the custom: prefix to the attribute name. This operation can set a user's email address or phone number as verified and permit immediate sign-in in user pools that require verification of these attributes. To do this, set the email_verified or phone_number_verified attribute to true. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: /// - clientMetadata: A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminUpdateUserAttributes API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminUpdateUserAttributes request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see /// - userAttributes: An array of name-value pairs representing user attributes. For custom attributes, you must prepend the custom: prefix to the attribute name. If your user pool requires verification before Amazon Cognito updates an attribute value that you specify in this request, Amazon Cognito doesn’t immediately update the value of that attribute. After your user receives and responds to a verification message to verify the new value, Amazon Cognito updates the attribute value. Your user can sign in and receive messages with the original attribute value until they verify the new value. To skip the verification message and update the value of an attribute that requires verification in the same API request, include the email_verified or phone_number_verified attribute, with a value of true. If you set the email_verified or phone_number_verified value for an email or phone_number attribute that requires verification to true, Amazon Cognito doesn’t send a verification message to your user. /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. - /// - userPoolId: The user pool ID for the user pool where you want to update user attributes. + /// - userPoolId: The ID of the user pool where you want to update user attributes. /// - logger: Logger use during operation @inlinable public func adminUpdateUserAttributes( @@ -1059,7 +1059,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.adminUpdateUserAttributes(input, logger: logger) } - /// Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call this operation with your administrative credentials when your user signs out of your app. This results in the following behavior. Amazon Cognito no longer accepts token-authorized user operations that you authorize with a signed-out user's access tokens. For more information, see Using the Amazon Cognito user pools API and user pool endpoints. Amazon Cognito returns an Access Token has been revoked error when your app attempts to authorize a user pools API request with a revoked access token that contains the scope aws.cognito.signin.user.admin. Amazon Cognito no longer accepts a signed-out user's ID token in a GetId request to an identity pool with ServerSideTokenCheck enabled for its user pool IdP configuration in CognitoIdentityProvider. Amazon Cognito no longer accepts a signed-out user's refresh tokens in refresh requests. Other requests might be valid until your user's token expires. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call this operation with your administrative credentials when your user signs out of your app. This results in the following behavior. Amazon Cognito no longer accepts token-authorized user operations that you authorize with a signed-out user's access tokens. For more information, see Using the Amazon Cognito user pools API and user pool endpoints. Amazon Cognito returns an Access Token has been revoked error when your app attempts to authorize a user pools API request with a revoked access token that contains the scope aws.cognito.signin.user.admin. Amazon Cognito no longer accepts a signed-out user's ID token in a GetId request to an identity pool with ServerSideTokenCheck enabled for its user pool IdP configuration in CognitoIdentityProvider. Amazon Cognito no longer accepts a signed-out user's refresh tokens in refresh requests. Other requests might be valid until your user's token expires. This operation doesn't clear the managed login session cookie. To clear the session for a user who signed in with managed login or the classic hosted UI, direct their browser session to the logout endpoint. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func adminUserGlobalSignOut(_ input: AdminUserGlobalSignOutRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AdminUserGlobalSignOutResponse { @@ -1072,11 +1072,11 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call this operation with your administrative credentials when your user signs out of your app. This results in the following behavior. Amazon Cognito no longer accepts token-authorized user operations that you authorize with a signed-out user's access tokens. For more information, see Using the Amazon Cognito user pools API and user pool endpoints. Amazon Cognito returns an Access Token has been revoked error when your app attempts to authorize a user pools API request with a revoked access token that contains the scope aws.cognito.signin.user.admin. Amazon Cognito no longer accepts a signed-out user's ID token in a GetId request to an identity pool with ServerSideTokenCheck enabled for its user pool IdP configuration in CognitoIdentityProvider. Amazon Cognito no longer accepts a signed-out user's refresh tokens in refresh requests. Other requests might be valid until your user's token expires. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call this operation with your administrative credentials when your user signs out of your app. This results in the following behavior. Amazon Cognito no longer accepts token-authorized user operations that you authorize with a signed-out user's access tokens. For more information, see Using the Amazon Cognito user pools API and user pool endpoints. Amazon Cognito returns an Access Token has been revoked error when your app attempts to authorize a user pools API request with a revoked access token that contains the scope aws.cognito.signin.user.admin. Amazon Cognito no longer accepts a signed-out user's ID token in a GetId request to an identity pool with ServerSideTokenCheck enabled for its user pool IdP configuration in CognitoIdentityProvider. Amazon Cognito no longer accepts a signed-out user's refresh tokens in refresh requests. Other requests might be valid until your user's token expires. This operation doesn't clear the managed login session cookie. To clear the session for a user who signed in with managed login or the classic hosted UI, direct their browser session to the logout endpoint. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. - /// - userPoolId: The user pool ID. + /// - userPoolId: The ID of the user pool where you want to sign out a user. /// - logger: Logger use during operation @inlinable public func adminUserGlobalSignOut( @@ -1091,7 +1091,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.adminUserGlobalSignOut(input, logger: logger) } - /// Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito. Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge each time your user signs in. Complete setup with AssociateSoftwareToken and VerifySoftwareToken. After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to this challenge with your user's TOTP. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. + /// Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito. Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge each time your user signs in. Complete setup with AssociateSoftwareToken and VerifySoftwareToken. After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to this challenge with your user's TOTP. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. @Sendable @inlinable public func associateSoftwareToken(_ input: AssociateSoftwareTokenRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssociateSoftwareTokenResponse { @@ -1104,11 +1104,11 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito. Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge each time your user signs in. Complete setup with AssociateSoftwareToken and VerifySoftwareToken. After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to this challenge with your user's TOTP. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. + /// Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito. Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge each time your user signs in. Complete setup with AssociateSoftwareToken and VerifySoftwareToken. After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to this challenge with your user's TOTP. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. /// /// Parameters: - /// - accessToken: A valid access token that Amazon Cognito issued to the user whose software token you want to generate. - /// - session: The session that should be passed both ways in challenge-response calls to the service. This allows authentication of the user as part of the MFA setup process. + /// - accessToken: A valid access token that Amazon Cognito issued to the user whose software token you want to generate. You can provide either an access token or a session ID in the request. + /// - session: The session identifier that maintains the state of authentication requests and challenge responses. In AssociateSoftwareToken, this is the session ID from a successful sign-in. You can provide either an access token or a session ID in the request. /// - logger: Logger use during operation @inlinable public func associateSoftwareToken( @@ -1141,7 +1141,7 @@ public struct CognitoIdentityProvider: AWSService { /// Parameters: /// - accessToken: A valid access token that Amazon Cognito issued to the user whose password you want to change. /// - previousPassword: The user's previous password. Required if the user has a password. If the user has no password and only signs in with passwordless authentication options, you can omit this parameter. - /// - proposedPassword: The new password. + /// - proposedPassword: A new password that you prompted the user to enter in your application. /// - logger: Logger use during operation @inlinable public func changePassword( @@ -1174,7 +1174,7 @@ public struct CognitoIdentityProvider: AWSService { /// Completes registration of a passkey authenticator for the current user. Your application provides data from a successful registration request with the data from the output of a StartWebAuthnRegistration. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. /// /// Parameters: - /// - accessToken: A valid access token that Amazon Cognito issued to the user whose passkey registration you want to verify. + /// - accessToken: A valid access token that Amazon Cognito issued to the user whose passkey registration you want to complete. /// - credential: A RegistrationResponseJSON public-key credential response from the user's passkey provider. /// - logger: Logger use during operation @inlinable @@ -1190,7 +1190,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.completeWebAuthnRegistration(input, logger: logger) } - /// Confirms tracking of the device. This API call is the call that begins device tracking. For more information about device authentication, see Working with user devices in your user pool. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. + /// Confirms a device that a user wants to remember. A remembered device is a "Remember me on this device" option for user pools that perform authentication with the device key of a trusted device in the back end, instead of a user-provided MFA code. For more information about device authentication, see Working with user devices in your user pool. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. @Sendable @inlinable public func confirmDevice(_ input: ConfirmDeviceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ConfirmDeviceResponse { @@ -1203,12 +1203,12 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Confirms tracking of the device. This API call is the call that begins device tracking. For more information about device authentication, see Working with user devices in your user pool. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. + /// Confirms a device that a user wants to remember. A remembered device is a "Remember me on this device" option for user pools that perform authentication with the device key of a trusted device in the back end, instead of a user-provided MFA code. For more information about device authentication, see Working with user devices in your user pool. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. /// /// Parameters: /// - accessToken: A valid access token that Amazon Cognito issued to the user whose device you want to confirm. - /// - deviceKey: The device key. - /// - deviceName: The device name. + /// - deviceKey: The unique identifier, or device key, of the device that you want to update the status for. + /// - deviceName: A friendly name for the device, for example MyMobilePhone. /// - deviceSecretVerifierConfig: The configuration of the device secret verifier. /// - logger: Logger use during operation @inlinable @@ -1228,7 +1228,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.confirmDevice(input, logger: logger) } - /// Allows a user to enter a confirmation code to reset a forgotten password. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. + /// This public API operation accepts a confirmation code that Amazon Cognito sent to a user and accepts a new password for that user. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. @Sendable @inlinable public func confirmForgotPassword(_ input: ConfirmForgotPasswordRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ConfirmForgotPasswordResponse { @@ -1241,13 +1241,13 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Allows a user to enter a confirmation code to reset a forgotten password. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. + /// This public API operation accepts a confirmation code that Amazon Cognito sent to a user and accepts a new password for that user. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. /// /// Parameters: /// - analyticsMetadata: The Amazon Pinpoint analytics metadata for collecting metrics for ConfirmForgotPassword calls. - /// - clientId: The app client ID of the app associated with the user pool. + /// - clientId: The ID of the app client where the user wants to reset their password. This parameter is an identifier of the client application that users are resetting their password from, but this operation resets users' passwords for all app clients in the user pool. /// - clientMetadata: A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the ConfirmForgotPassword API action, Amazon Cognito invokes the function that is assigned to the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ConfirmForgotPassword request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see - /// - confirmationCode: The confirmation code from your user's request to reset their password. For more information, see ForgotPassword. + /// - confirmationCode: The confirmation code that your user pool sent in response to an AdminResetUserPassword or a ForgotPassword request. /// - password: The new password that your user wants to set. /// - secretHash: A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. For more information about SecretHash, see Computing secret hash values. /// - userContextData: Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced @@ -1278,7 +1278,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.confirmForgotPassword(input, logger: logger) } - /// This public API operation provides a code that Amazon Cognito sent to your user when they signed up in your user pool via the SignUp API operation. After your user enters their code, they confirm ownership of the email address or phone number that they provided, and their user account becomes active. Depending on your user pool configuration, your users will receive their confirmation code in an email or SMS message. Local users who signed up in your user pool are the only type of user who can confirm sign-up with a code. Users who federate through an external identity provider (IdP) have already been confirmed by their IdP. Administrator-created users, users created with the AdminCreateUser API operation, confirm their accounts when they respond to their invitation email message and choose a password. They do not receive a confirmation code. Instead, they receive a temporary password. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. + /// This public API operation submits a code that Amazon Cognito sent to your user when they signed up in your user pool via the SignUp API operation. After your user enters their code, they confirm ownership of the email address or phone number that they provided, and their user account becomes active. Depending on your user pool configuration, your users will receive their confirmation code in an email or SMS message. Local users who signed up in your user pool are the only type of user who can confirm sign-up with a code. Users who federate through an external identity provider (IdP) have already been confirmed by their IdP. Administrator-created users, users created with the AdminCreateUser API operation, confirm their accounts when they respond to their invitation email message and choose a password. They do not receive a confirmation code. Instead, they receive a temporary password. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. @Sendable @inlinable public func confirmSignUp(_ input: ConfirmSignUpRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ConfirmSignUpResponse { @@ -1291,15 +1291,15 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// This public API operation provides a code that Amazon Cognito sent to your user when they signed up in your user pool via the SignUp API operation. After your user enters their code, they confirm ownership of the email address or phone number that they provided, and their user account becomes active. Depending on your user pool configuration, your users will receive their confirmation code in an email or SMS message. Local users who signed up in your user pool are the only type of user who can confirm sign-up with a code. Users who federate through an external identity provider (IdP) have already been confirmed by their IdP. Administrator-created users, users created with the AdminCreateUser API operation, confirm their accounts when they respond to their invitation email message and choose a password. They do not receive a confirmation code. Instead, they receive a temporary password. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. + /// This public API operation submits a code that Amazon Cognito sent to your user when they signed up in your user pool via the SignUp API operation. After your user enters their code, they confirm ownership of the email address or phone number that they provided, and their user account becomes active. Depending on your user pool configuration, your users will receive their confirmation code in an email or SMS message. Local users who signed up in your user pool are the only type of user who can confirm sign-up with a code. Users who federate through an external identity provider (IdP) have already been confirmed by their IdP. Administrator-created users, users created with the AdminCreateUser API operation, confirm their accounts when they respond to their invitation email message and choose a password. They do not receive a confirmation code. Instead, they receive a temporary password. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. /// /// Parameters: /// - analyticsMetadata: The Amazon Pinpoint analytics metadata for collecting metrics for ConfirmSignUp calls. /// - clientId: The ID of the app client associated with the user pool. /// - clientMetadata: A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the ConfirmSignUp API action, Amazon Cognito invokes the function that is assigned to the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ConfirmSignUp request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see - /// - confirmationCode: The confirmation code sent by a user's request to confirm registration. - /// - forceAliasCreation: Boolean to be specified to force user confirmation irrespective of existing alias. By default set to False. If this parameter is set to True and the phone number/email used for sign up confirmation already exists as an alias with a different user, the API call will migrate the alias from the previous user to the newly created user being confirmed. If set to False, the API will throw an AliasExistsException error. - /// - secretHash: A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. + /// - confirmationCode: The confirmation code that your user pool sent in response to the SignUp request. + /// - forceAliasCreation: When true, forces user confirmation despite any existing aliases. Defaults to false. A value of true migrates the alias from an existing user to the new user if an existing user already has the phone number or email address as an alias. Say, for example, that an existing user has an email attribute of bob@example.com and email is an alias in your user pool. If the new user also has an email of bob@example.com and your ConfirmSignUp response sets ForceAliasCreation to true, the new user can sign in with a username of bob@example.com and the existing user can no longer do so. If false and an attribute belongs to an existing alias, this request returns an AliasExistsException error. For more information about sign-in aliases, see Customizing sign-in attributes. + /// - secretHash: A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. For more information about SecretHash, see Computing secret hash values. /// - session: The optional session ID from a SignUp API request. You can sign in a user directly from the sign-up process with the USER_AUTH authentication flow. /// - userContextData: Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. @@ -1331,7 +1331,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.confirmSignUp(input, logger: logger) } - /// Creates a new group in the specified user pool. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Creates a new group in the specified user pool. For more information about user pool groups see Adding groups to a user pool. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func createGroup(_ input: CreateGroupRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateGroupResponse { @@ -1344,14 +1344,14 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Creates a new group in the specified user pool. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Creates a new group in the specified user pool. For more information about user pool groups see Adding groups to a user pool. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - description: A string containing the description of the group. - /// - groupName: The name of the group. Must be unique. + /// - description: A description of the group that you're creating. + /// - groupName: A name for the group. This name must be unique in your user pool. /// - precedence: A non-negative integer value that specifies the precedence of this group relative to the other groups that a user can belong to in the user pool. Zero is the highest precedence value. Groups with lower Precedence values take precedence over groups with higher or null Precedence values. If a user belongs to two or more groups, it is the group with the lowest precedence value whose role ARN is given in the user's tokens for the cognito:roles and cognito:preferred_role claims. Two groups can have the same Precedence value. If this happens, neither group takes precedence over the other. If two groups with the same Precedence have the same role ARN, that role is used in the cognito:preferred_role claim in tokens for users in each group. If the two groups have different role ARNs, the cognito:preferred_role claim isn't set in users' tokens. The default Precedence value is null. The maximum Precedence value is 2^31-1. - /// - roleArn: The role Amazon Resource Name (ARN) for the group. - /// - userPoolId: The user pool ID for the user pool. + /// - roleArn: The Amazon Resource Name (ARN) for the IAM role that you want to associate with the group. A group role primarily declares a preferred role for the credentials that you get from an identity pool. Amazon Cognito ID tokens have a cognito:preferred_role claim that presents the highest-precedence group that a user belongs to. Both ID and access tokens also contain a cognito:groups claim that list all the groups that a user is a member of. + /// - userPoolId: The ID of the user pool where you want to create a user group. /// - logger: Logger use during operation @inlinable public func createGroup( @@ -1372,7 +1372,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.createGroup(input, logger: logger) } - /// Adds a configuration and trust relationship between a third-party identity provider (IdP) and a user pool. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Adds a configuration and trust relationship between a third-party identity provider (IdP) and a user pool. Amazon Cognito accepts sign-in with third-party identity providers through managed login and OIDC relying-party libraries. For more information, see Third-party IdP sign-in. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func createIdentityProvider(_ input: CreateIdentityProviderRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateIdentityProviderResponse { @@ -1385,15 +1385,15 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Adds a configuration and trust relationship between a third-party identity provider (IdP) and a user pool. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Adds a configuration and trust relationship between a third-party identity provider (IdP) and a user pool. Amazon Cognito accepts sign-in with third-party identity providers through managed login and OIDC relying-party libraries. For more information, see Third-party IdP sign-in. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - attributeMapping: A mapping of IdP attributes to standard and custom user pool attributes. - /// - idpIdentifiers: A list of IdP identifiers. + /// - attributeMapping: A mapping of IdP attributes to standard and custom user pool attributes. Specify a user pool attribute as the key of the key-value pair, and the IdP attribute claim name as the value. + /// - idpIdentifiers: An array of IdP identifiers, for example "IdPIdentifiers": [ "MyIdP", "MyIdP2" ]. Identifiers are friendly names that you can pass in the idp_identifier query parameter of requests to the Authorize endpoint to silently redirect to sign-in with the associated IdP. Identifiers in a domain format also enable the use of email-address matching with SAML providers. /// - providerDetails: The scopes, URLs, and identifiers for your external identity provider. The following - /// - providerName: The IdP name. - /// - providerType: The IdP type. - /// - userPoolId: The user pool ID. + /// - providerName: The name that you want to assign to the IdP. You can pass the identity provider name in the identity_provider query parameter of requests to the Authorize endpoint to silently redirect to sign-in with the associated IdP. + /// - providerType: The type of IdP that you want to add. Amazon Cognito supports OIDC, SAML 2.0, Login With Amazon, Sign In With Apple, Google, and Facebook IdPs. + /// - userPoolId: The Id of the user pool where you want to create an IdP. /// - logger: Logger use during operation @inlinable public func createIdentityProvider( @@ -1416,7 +1416,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.createIdentityProvider(input, logger: logger) } - /// Creates a new set of branding settings for a user pool style and associates it with an app client. This operation is the programmatic option for the creation of a new style in the branding designer. Provides values for UI customization in a Settings JSON object and image files in an Assets array. To send the JSON object Document type parameter in Settings, you might need to update to the most recent version of your Amazon Web Services SDK. This operation has a 2-megabyte request-size limit and include the CSS settings and image assets for your app client. Your branding settings might exceed 2MB in size. Amazon Cognito doesn't require that you pass all parameters in one request and preserves existing style settings that you don't specify. If your request is larger than 2MB, separate it into multiple requests, each with a size smaller than the limit. For more information, see API and SDK operations for managed login branding Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Creates a new set of branding settings for a user pool style and associates it with an app client. This operation is the programmatic option for the creation of a new style in the branding designer. Provides values for UI customization in a Settings JSON object and image files in an Assets array. To send the JSON object Document type parameter in Settings, you might need to update to the most recent version of your Amazon Web Services SDK. To create a new style with default settings, set UseCognitoProvidedValues to true and don't provide values for any other options. This operation has a 2-megabyte request-size limit and include the CSS settings and image assets for your app client. Your branding settings might exceed 2MB in size. Amazon Cognito doesn't require that you pass all parameters in one request and preserves existing style settings that you don't specify. If your request is larger than 2MB, separate it into multiple requests, each with a size smaller than the limit. As a best practice, modify the output of DescribeManagedLoginBrandingByClient into the request parameters for this operation. To get all settings, set ReturnMergedResources to true. For more information, see API and SDK operations for managed login branding. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func createManagedLoginBranding(_ input: CreateManagedLoginBrandingRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateManagedLoginBrandingResponse { @@ -1429,13 +1429,13 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Creates a new set of branding settings for a user pool style and associates it with an app client. This operation is the programmatic option for the creation of a new style in the branding designer. Provides values for UI customization in a Settings JSON object and image files in an Assets array. To send the JSON object Document type parameter in Settings, you might need to update to the most recent version of your Amazon Web Services SDK. This operation has a 2-megabyte request-size limit and include the CSS settings and image assets for your app client. Your branding settings might exceed 2MB in size. Amazon Cognito doesn't require that you pass all parameters in one request and preserves existing style settings that you don't specify. If your request is larger than 2MB, separate it into multiple requests, each with a size smaller than the limit. For more information, see API and SDK operations for managed login branding Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Creates a new set of branding settings for a user pool style and associates it with an app client. This operation is the programmatic option for the creation of a new style in the branding designer. Provides values for UI customization in a Settings JSON object and image files in an Assets array. To send the JSON object Document type parameter in Settings, you might need to update to the most recent version of your Amazon Web Services SDK. To create a new style with default settings, set UseCognitoProvidedValues to true and don't provide values for any other options. This operation has a 2-megabyte request-size limit and include the CSS settings and image assets for your app client. Your branding settings might exceed 2MB in size. Amazon Cognito doesn't require that you pass all parameters in one request and preserves existing style settings that you don't specify. If your request is larger than 2MB, separate it into multiple requests, each with a size smaller than the limit. As a best practice, modify the output of DescribeManagedLoginBrandingByClient into the request parameters for this operation. To get all settings, set ReturnMergedResources to true. For more information, see API and SDK operations for managed login branding. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: /// - assets: An array of image files that you want to apply to roles like backgrounds, logos, and icons. Each object must also indicate whether it is for dark mode, light mode, or browser-adaptive mode. /// - clientId: The app client that you want to create the branding style for. Each style is permanently linked to an app client. To change the style for an app client, delete the existing style with DeleteManagedLoginBranding and create a new one. /// - settings: A JSON file, encoded as a Document type, with the the settings that you want to apply to your style. - /// - useCognitoProvidedValues: When true, applies the default branding style options. This option reverts to default style options that are managed by Amazon Cognito. You can modify them later in the branding designer. When you specify true for this option, you must also omit values for Settings and Assets in the request. + /// - useCognitoProvidedValues: When true, applies the default branding style options. These default options are managed by Amazon Cognito. You can modify them later in the branding designer. When you specify true for this option, you must also omit values for Settings and Assets in the request. /// - userPoolId: The ID of the user pool where you want to create a new branding style. /// - logger: Logger use during operation @inlinable @@ -1457,7 +1457,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.createManagedLoginBranding(input, logger: logger) } - /// Creates a new OAuth2.0 resource server and defines custom scopes within it. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Creates a new OAuth2.0 resource server and defines custom scopes within it. Resource servers are associated with custom scopes and machine-to-machine (M2M) authorization. For more information, see Access control with resource servers. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func createResourceServer(_ input: CreateResourceServerRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateResourceServerResponse { @@ -1470,13 +1470,13 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Creates a new OAuth2.0 resource server and defines custom scopes within it. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Creates a new OAuth2.0 resource server and defines custom scopes within it. Resource servers are associated with custom scopes and machine-to-machine (M2M) authorization. For more information, see Access control with resource servers. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: /// - identifier: A unique resource server identifier for the resource server. The identifier can be an API friendly name like solar-system-data. You can also set an API URL like https://solar-system-data-api.example.com as your identifier. Amazon Cognito represents scopes in the access token in the format $resource-server-identifier/$scope. Longer scope-identifier strings increase the size of your access tokens. /// - name: A friendly name for the resource server. - /// - scopes: A list of scopes. Each scope is a key-value map with the keys name and description. - /// - userPoolId: The user pool ID for the user pool. + /// - scopes: A list of custom scopes. Each scope is a key-value map with the keys ScopeName and ScopeDescription. The name of a custom scope is a combination of ScopeName and the resource server Name in this request, for example MyResourceServerName/MyScopeName. + /// - userPoolId: The ID of the user pool where you want to create a resource server. /// - logger: Logger use during operation @inlinable public func createResourceServer( @@ -1495,7 +1495,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.createResourceServer(input, logger: logger) } - /// Creates a user import job. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Creates a user import job. You can import users into user pools from a comma-separated values (CSV) file without adding Amazon Cognito MAU costs to your Amazon Web Services bill. To generate a template for your import, see GetCSVHeader. To learn more about CSV import, see Importing users from a CSV file. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func createUserImportJob(_ input: CreateUserImportJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateUserImportJobResponse { @@ -1508,12 +1508,12 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Creates a user import job. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Creates a user import job. You can import users into user pools from a comma-separated values (CSV) file without adding Amazon Cognito MAU costs to your Amazon Web Services bill. To generate a template for your import, see GetCSVHeader. To learn more about CSV import, see Importing users from a CSV file. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - cloudWatchLogsRoleArn: The role ARN for the Amazon CloudWatch Logs Logging role for the user import job. - /// - jobName: The job name for the user import job. - /// - userPoolId: The user pool ID for the user pool that the users are being imported into. + /// - cloudWatchLogsRoleArn: You must specify an IAM role that has permission to log import-job results to Amazon CloudWatch Logs. This parameter is the ARN of that role. + /// - jobName: A friendly name for the user import job. + /// - userPoolId: The ID of the user pool that you want to import users into. /// - logger: Logger use during operation @inlinable public func createUserImportJob( @@ -1530,7 +1530,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.createUserImportJob(input, logger: logger) } - /// This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Creates a new Amazon Cognito user pool and sets the password policy for the pool. If you don't provide a value for an attribute, Amazon Cognito sets it to its default value. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Creates a new Amazon Cognito user pool. This operation sets basic and advanced configuration options. You can create a user pool in the Amazon Cognito console to your preferences and use the output of DescribeUserPool to generate requests from that baseline. If you don't provide a value for an attribute, Amazon Cognito sets it to its default value. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func createUserPool(_ input: CreateUserPoolRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateUserPoolResponse { @@ -1543,29 +1543,29 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Creates a new Amazon Cognito user pool and sets the password policy for the pool. If you don't provide a value for an attribute, Amazon Cognito sets it to its default value. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Creates a new Amazon Cognito user pool. This operation sets basic and advanced configuration options. You can create a user pool in the Amazon Cognito console to your preferences and use the output of DescribeUserPool to generate requests from that baseline. If you don't provide a value for an attribute, Amazon Cognito sets it to its default value. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: /// - accountRecoverySetting: The available verified method a user can use to recover their password when they call ForgotPassword. You can use this setting to define a preferred method when a user has more than one method available. With this setting, SMS doesn't qualify for a valid password recovery mechanism if the user also has SMS multi-factor authentication (MFA) activated. In the absence of this setting, Amazon Cognito uses the legacy behavior to determine the recovery method where SMS is preferred through email. - /// - adminCreateUserConfig: The configuration for AdminCreateUser requests. - /// - aliasAttributes: Attributes supported as an alias for this user pool. Possible values: phone_number, email, or preferred_username. - /// - autoVerifiedAttributes: The attributes to be auto-verified. Possible values: email, phone_number. + /// - adminCreateUserConfig: The configuration for AdminCreateUser requests. Includes the template for the invitation message for new users, the duration of temporary passwords, and permitting self-service sign-up. + /// - aliasAttributes: Attributes supported as an alias for this user pool. Possible values: phone_number, email, or preferred_username. For more information about alias attributes, see Customizing sign-in attributes. + /// - autoVerifiedAttributes: The attributes that you want your user pool to automatically verify. Possible values: email, phone_number. For more information see Verifying contact information at sign-up. /// - deletionProtection: When active, DeletionProtection prevents accidental deletion of your user - /// - deviceConfiguration: The device-remembering configuration for a user pool. A null value indicates that you have deactivated device remembering in your user pool. When you provide a value for any DeviceConfiguration field, you activate the Amazon Cognito device-remembering feature. + /// - deviceConfiguration: The device-remembering configuration for a user pool. Device remembering or device tracking is a "Remember me on this device" option for user pools that perform authentication with the device key of a trusted device in the back end, instead of a user-provided MFA code. For more information about device authentication, see Working with user devices in your user pool. A null value indicates that you have deactivated device remembering in your user pool. When you provide a value for any DeviceConfiguration field, you activate the Amazon Cognito device-remembering feature. For more infor /// - emailConfiguration: The email configuration of your user pool. The email configuration type sets your preferred sending method, Amazon Web Services Region, and sender for messages from your user pool. /// - emailVerificationMessage: This parameter is no longer used. See VerificationMessageTemplateType. /// - emailVerificationSubject: This parameter is no longer used. See VerificationMessageTemplateType. /// - lambdaConfig: A collection of user pool Lambda triggers. Amazon Cognito invokes triggers at several possible stages of authentication operations. Triggers can modify the outcome of the operations that invoked them. - /// - mfaConfiguration: Specifies MFA configuration details. - /// - policies: The policies associated with the new user pool. - /// - poolName: A string used to name the user pool. - /// - schema: An array of schema attributes for the new user pool. These attributes can be standard or custom attributes. + /// - mfaConfiguration: Sets multi-factor authentication (MFA) to be on, off, or optional. When ON, all users must set up MFA before they can sign in. When OPTIONAL, your application must make a client-side determination of whether a user wants to register an MFA device. For user pools with adaptive authentication with threat protection, choose OPTIONAL. + /// - policies: The password policy and sign-in policy in the user pool. The password policy sets options like password complexity requirements and password history. The sign-in policy sets the options available to applications in choice-based authentication. + /// - poolName: A friendlhy name for your user pool. + /// - schema: An array of attributes for the new user pool. You can add custom attributes and modify the properties of default attributes. The specifications in this parameter set the required attributes in your user pool. For more information, see Working with user attributes. /// - smsAuthenticationMessage: A string representing the SMS authentication message. - /// - smsConfiguration: The SMS configuration with the settings that your Amazon Cognito user pool must use to send an SMS message from your Amazon Web Services account through Amazon Simple Notification Service. To send SMS messages with Amazon SNS in the Amazon Web Services Region that you want, the Amazon Cognito user pool uses an Identity and Access Management (IAM) role in your Amazon Web Services account. + /// - smsConfiguration: The SMS configuration with the settings that your Amazon Cognito user pool must use to send an SMS message from your Amazon Web Services account through Amazon Simple Notification Service. To send SMS messages with Amazon SNS in the Amazon Web Services Region that you want, the Amazon Cognito user pool uses an Identity and Access Management (IAM) role in your Amazon Web Services account. For more information see SMS message settings. /// - smsVerificationMessage: This parameter is no longer used. See VerificationMessageTemplateType. /// - userAttributeUpdateSettings: The settings for updates to user attributes. These settings include the property AttributesRequireVerificationBeforeUpdate, - /// - usernameAttributes: Specifies whether a user can use an email address or phone number as a username when they sign up. - /// - usernameConfiguration: Case sensitivity on the username input for the selected sign-in option. When case sensitivity is set to False (case insensitive), users can sign in with any combination of capital and lowercase letters. For example, username, USERNAME, or UserName, or for email, email@example.com or EMaiL@eXamplE.Com. For most use cases, set case sensitivity to False (case insensitive) as a best practice. When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in case as the same user, and prevents a case variation from being assigned to the same attribute for a different user. This configuration is immutable after you set it. For more information, see UsernameConfigurationType. + /// - usernameAttributes: Specifies whether a user can use an email address or phone number as a username when they sign up. For more information, see Customizing sign-in attributes. + /// - usernameConfiguration: Sets the case sensitivity option for sign-in usernames. When CaseSensitive is false (case insensitive), users can sign in with any combination of capital and lowercase letters. For example, username, USERNAME, or UserName, or for email, email@example.com or EMaiL@eXamplE.Com. For most use cases, set case sensitivity to false as a best practice. When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in case as the same user, and prevents a case variation from being assigned to the same attribute for a different user. When CaseSensitive is true (case sensitive), Amazon Cognito interprets USERNAME and UserName as distinct users. This configuration is immutable after you set it. /// - userPoolAddOns: User pool add-ons. Contains settings for activation of advanced security features. To log user security information but take no action, set to AUDIT. To configure automatic security responses to risky traffic to your user pool, set to ENFORCED. For more information, see Adding advanced security to a user pool. /// - userPoolTags: The tag keys and values to assign to the user pool. A tag is a label that you can use to categorize and manage user pools in different ways, such as by purpose, owner, environment, or other criteria. /// - userPoolTier: The user pool feature plan, or tier. This parameter determines the eligibility of the user pool for features like managed login, access-token customization, and threat protection. Defaults to ESSENTIALS. @@ -1628,7 +1628,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.createUserPool(input, logger: logger) } - /// Creates the user pool client. When you create a new user pool client, token revocation is automatically activated. For more information about revoking tokens, see RevokeToken. If you don't provide a value for an attribute, Amazon Cognito sets it to its default value. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Creates an app client in a user pool. This operation sets basic and advanced configuration options. You can create an app client in the Amazon Cognito console to your preferences and use the output of DescribeUserPoolClient to generate requests from that baseline. New app clients activate token revocation by default. For more information about revoking tokens, see RevokeToken. If you don't provide a value for an attribute, Amazon Cognito sets it to its default value. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func createUserPoolClient(_ input: CreateUserPoolClientRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateUserPoolClientResponse { @@ -1641,30 +1641,30 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Creates the user pool client. When you create a new user pool client, token revocation is automatically activated. For more information about revoking tokens, see RevokeToken. If you don't provide a value for an attribute, Amazon Cognito sets it to its default value. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Creates an app client in a user pool. This operation sets basic and advanced configuration options. You can create an app client in the Amazon Cognito console to your preferences and use the output of DescribeUserPoolClient to generate requests from that baseline. New app clients activate token revocation by default. For more information about revoking tokens, see RevokeToken. If you don't provide a value for an attribute, Amazon Cognito sets it to its default value. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: /// - accessTokenValidity: The access token time limit. After this limit expires, your user can't use /// - allowedOAuthFlows: The OAuth grant types that you want your app client to generate. To create an app client that generates client credentials grants, you must add client_credentials as the only allowed OAuth flow. code Use a code grant flow, which provides an authorization code as the response. This code can be exchanged for access tokens with the /oauth2/token endpoint. implicit Issue the access token (and, optionally, ID token, based on scopes) directly to your user. client_credentials Issue the access token from the /oauth2/token endpoint directly to a non-person user using a combination of the client ID and client secret. /// - allowedOAuthFlowsUserPoolClient: Set to true to use OAuth 2.0 features in your user pool app client. AllowedOAuthFlowsUserPoolClient must be true before you can configure - /// - allowedOAuthScopes: The allowed OAuth scopes. Possible values provided by OAuth are phone, email, openid, and profile. Possible values provided by Amazon Web Services are aws.cognito.signin.user.admin. Custom scopes created in Resource Servers are also supported. - /// - analyticsConfiguration: The user pool analytics configuration for collecting metrics and sending them to your Amazon Pinpoint campaign. In Amazon Web Services Regions where Amazon Pinpoint isn't available, user pools only support sending events to Amazon Pinpoint projects in Amazon Web Services Region us-east-1. In Regions where Amazon Pinpoint is available, user pools support sending events to Amazon Pinpoint projects within that same Region. + /// - allowedOAuthScopes: The OAuth 2.0 scopes that you want to permit your app client to authorize. Scopes govern access control to user pool self-service API operations, user data from the userInfo endpoint, and third-party APIs. Possible values provided by OAuth are phone, email, openid, and profile. Possible values provided by Amazon Web Services are aws.cognito.signin.user.admin. Custom scopes created in Resource Servers are also supported. + /// - analyticsConfiguration: The user pool analytics configuration for collecting metrics and sending them to your Amazon Pinpoint campaign. In Amazon Web Services Regions where Amazon Pinpoint isn't available, user pools might not have access to analytics or might be configurable with campaigns in the US East (N. Virginia) Region. For more information, see Using Amazon Pinpoint analytics. /// - authSessionValidity: Amazon Cognito creates a session token for each API request in an authentication flow. AuthSessionValidity is the duration, - /// - callbackURLs: A list of allowed redirect (callback) URLs for the IdPs. A redirect URI must: Be an absolute URI. Be registered with the authorization server. Not include a fragment component. See OAuth 2.0 - Redirection Endpoint. Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only. App callback URLs such as myapp://example are also supported. - /// - clientName: The client name for the user pool client you would like to create. - /// - defaultRedirectURI: The default redirect URI. In app clients with one assigned IdP, replaces redirect_uri in authentication requests. Must be in the CallbackURLs list. A redirect URI must: Be an absolute URI. Be registered with the authorization server. Not include a fragment component. For more information, see Default redirect URI. Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only. App callback URLs such as myapp://example are also supported. + /// - callbackURLs: A list of allowed redirect (callback) URLs for the IdPs. A redirect URI must: Be an absolute URI. Be registered with the authorization server. Amazon Cognito doesn't accept authorization requests with redirect_uri values that aren't in the list of CallbackURLs that you provide in this parameter. Not include a fragment component. See OAuth 2.0 - Redirection Endpoint. Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only. App callback URLs such as myapp://example are also supported. + /// - clientName: A friendly name for the app client that you want to create. + /// - defaultRedirectURI: The default redirect URI. In app clients with one assigned IdP, replaces redirect_uri in authentication requests. Must be in the CallbackURLs list. /// - enablePropagateAdditionalUserContextData: Activates the propagation of additional user context data. For more information about propagation of user context data, see Adding advanced security to a user pool. If you don’t include this parameter, you can't send device fingerprint information, including source IP address, to Amazon Cognito advanced security. You can only activate EnablePropagateAdditionalUserContextData in an app client that has a client secret. /// - enableTokenRevocation: Activates or deactivates token revocation. For more information about revoking tokens, see RevokeToken. If you don't include this parameter, token revocation is automatically activated for the new user pool client. /// - explicitAuthFlows: The authentication flows that you want your user pool client to support. For each app client in your user pool, you can sign in - /// - generateSecret: Boolean to specify whether you want to generate a secret for the user pool client being created. + /// - generateSecret: When true, generates a client secret for the app client. Client secrets are used with server-side and machine-to-machine applications. For more information, see App client types. /// - idTokenValidity: The ID token time limit. After this limit expires, your user can't use - /// - logoutURLs: A list of allowed logout URLs for the IdPs. + /// - logoutURLs: A list of allowed logout URLs for managed login authentication. For more information, see Logout endpoint. /// - preventUserExistenceErrors: Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to ENABLED and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to LEGACY, those APIs return a UserNotFoundException exception if the user doesn't exist in the user pool. Valid values include: ENABLED - This prevents user existence-related errors. LEGACY - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented. Defaults to LEGACY when you don't provide a value. /// - readAttributes: The list of user attributes that you want your app client to have read access to. After your user authenticates in your app, their access token authorizes them to read their own attribute value for any attribute in this list. An example of this kind of activity is when your user selects a link to view their profile information. Your app makes a GetUser API request to retrieve and display your user's profile data. When you don't specify the ReadAttributes for your app client, your app can read the values of email_verified, phone_number_verified, and the Standard attributes of your user pool. When your user pool app client has read access to these default attributes, ReadAttributes doesn't return any information. Amazon Cognito only populates ReadAttributes in the API response if you have specified your own custom set of read attributes. /// - refreshTokenValidity: The refresh token time limit. After this limit expires, your user can't use - /// - supportedIdentityProviders: A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: COGNITO, Facebook, Google, SignInWithApple, and LoginWithAmazon. You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example MySAMLIdP or MyOIDCIdP. This setting applies to providers that you can access with the hosted UI and OAuth 2.0 authorization server. The removal of COGNITO from this list doesn't prevent authentication operations for local users with the user pools API in an Amazon Web Services SDK. The only way to prevent API-based authentication is to block access with a WAF rule. - /// - tokenValidityUnits: The units in which the validity times are represented. The default unit for RefreshToken is days, and default for ID and access tokens are hours. - /// - userPoolId: The user pool ID for the user pool where you want to create a user pool client. + /// - supportedIdentityProviders: A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: COGNITO, Facebook, Google, SignInWithApple, and LoginWithAmazon. You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example MySAMLIdP or MyOIDCIdP. This setting applies to providers that you can access with managed login. The removal of COGNITO from this list doesn't prevent authentication operations for local users with the user pools API in an Amazon Web Services SDK. The only way to prevent API-based authentication is to block access with a WAF rule. + /// - tokenValidityUnits: The units that validity times are represented in. The default unit for refresh tokens is days, and the default for ID and access tokens are hours. + /// - userPoolId: The ID of the user pool where you want to create an app client. /// - writeAttributes: The list of user attributes that you want your app client to have write access to. After your user authenticates in your app, their access token authorizes them to set or modify their own attribute value for any attribute in this list. An example of this kind of activity is when you present your user with a form to update their profile information and they change their last name. Your app then makes an UpdateUserAttributes API request and sets family_name to the new value. When you don't specify the WriteAttributes for your app client, your app can write the values of the Standard attributes of your user pool. When your user pool has write access to these default attributes, WriteAttributes doesn't return any information. Amazon Cognito only populates WriteAttributes in the API response if you have specified your own custom set of write attributes. If your app client allows users to sign in through an IdP, this array must include all attributes that you have mapped to IdP attributes. Amazon Cognito updates mapped attributes when users sign in to your application through an IdP. If your app client does not have write access to a mapped attribute, Amazon Cognito throws an error when it tries to update the attribute. For more information, see Specifying IdP Attribute Mappings for Your user pool. /// - logger: Logger use during operation @inlinable @@ -1720,7 +1720,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.createUserPoolClient(input, logger: logger) } - /// Creates a new domain for a user pool. The domain hosts user pool domain services like managed login, the hosted UI (classic), and the user pool authorization server. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// A user pool domain hosts managed login, an authorization server and web server for authentication in your application. This operation creates a new user pool prefix or custom domain and sets the managed login branding version. Set the branding version to 1 for hosted UI (classic) or 2 for managed login. When you choose a custom domain, you must provide an SSL certificate in the US East (N. Virginia) Amazon Web Services Region in your request. Your prefix domain might take up to one minute to take effect. Your custom domain is online within five minutes, but it can take up to one hour to distribute your SSL certificate. For more information about adding a custom domain to your user pool, see Configuring a user pool domain. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func createUserPoolDomain(_ input: CreateUserPoolDomainRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateUserPoolDomainResponse { @@ -1733,12 +1733,12 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Creates a new domain for a user pool. The domain hosts user pool domain services like managed login, the hosted UI (classic), and the user pool authorization server. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// A user pool domain hosts managed login, an authorization server and web server for authentication in your application. This operation creates a new user pool prefix or custom domain and sets the managed login branding version. Set the branding version to 1 for hosted UI (classic) or 2 for managed login. When you choose a custom domain, you must provide an SSL certificate in the US East (N. Virginia) Amazon Web Services Region in your request. Your prefix domain might take up to one minute to take effect. Your custom domain is online within five minutes, but it can take up to one hour to distribute your SSL certificate. For more information about adding a custom domain to your user pool, see Configuring a user pool domain. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - customDomainConfig: The configuration for a custom domain that hosts the sign-up and sign-in webpages for your application. Provide this parameter only if you want to use a custom domain for your user pool. Otherwise, you can exclude this parameter and use the Amazon Cognito hosted domain instead. For more information about the hosted domain and custom domains, see Configuring a User Pool Domain. - /// - domain: The domain string. For custom domains, this is the fully-qualified domain name, such as auth.example.com. For Amazon Cognito prefix domains, this is the prefix alone, such as auth. - /// - managedLoginVersion: The version of managed login branding that you want to apply to your domain. A value of 1 indicates hosted UI (classic) branding and a version of 2 indicates managed login branding. Managed login requires that your user pool be configured for any feature plan other than Lite. + /// - customDomainConfig: The configuration for a custom domain. Configures your domain with an Certificate Manager certificate in the us-east-1 Region. Provide this parameter only if you want to use a custom domain for your user pool. Otherwise, you can exclude this parameter and use a prefix domain instead. For more information about the hosted domain and custom domains, see Configuring a User Pool Domain. + /// - domain: The domain string. For custom domains, this is the fully-qualified domain name, such as auth.example.com. For prefix domains, this is the prefix alone, such as myprefix. A prefix value of myprefix for a user pool in the us-east-1 Region results in a domain of myprefix.auth.us-east-1.amazoncognito.com. + /// - managedLoginVersion: The version of managed login branding that you want to apply to your domain. A value of 1 indicates hosted UI (classic) and a version of 2 indicates managed login. Managed login requires that your user pool be configured for any feature plan other than Lite. /// - userPoolId: The ID of the user pool where you want to add a domain. /// - logger: Logger use during operation @inlinable @@ -1758,7 +1758,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.createUserPoolDomain(input, logger: logger) } - /// Deletes a group. Calling this action requires developer credentials. + /// Deletes a group from the specified user pool. When you delete a group, that group no longer contributes to users' cognito:preferred_group or cognito:groups claims, and no longer influence access-control decision that are based on group membership. For more information about user pool groups, see Adding groups to a user pool. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func deleteGroup(_ input: DeleteGroupRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -1771,11 +1771,11 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Deletes a group. Calling this action requires developer credentials. + /// Deletes a group from the specified user pool. When you delete a group, that group no longer contributes to users' cognito:preferred_group or cognito:groups claims, and no longer influence access-control decision that are based on group membership. For more information about user pool groups, see Adding groups to a user pool. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - groupName: The name of the group. - /// - userPoolId: The user pool ID for the user pool. + /// - groupName: The name of the group that you want to delete. + /// - userPoolId: The ID of the user pool where you want to delete the group. /// - logger: Logger use during operation @inlinable public func deleteGroup( @@ -1790,7 +1790,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.deleteGroup(input, logger: logger) } - /// Deletes an IdP for a user pool. + /// Deletes a user pool identity provider (IdP). After you delete an IdP, users can no longer sign in to your user pool through that IdP. For more information about user pool IdPs, see Third-party IdP sign-in. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func deleteIdentityProvider(_ input: DeleteIdentityProviderRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -1803,11 +1803,11 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Deletes an IdP for a user pool. + /// Deletes a user pool identity provider (IdP). After you delete an IdP, users can no longer sign in to your user pool through that IdP. For more information about user pool IdPs, see Third-party IdP sign-in. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - providerName: The IdP name. - /// - userPoolId: The user pool ID. + /// - providerName: The name of the IdP that you want to delete. + /// - userPoolId: The ID of the user pool where you want to delete the identity provider. /// - logger: Logger use during operation @inlinable public func deleteIdentityProvider( @@ -1822,7 +1822,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.deleteIdentityProvider(input, logger: logger) } - /// Deletes a managed login branding style. When you delete a style, you delete the branding association for an app client and restore it to default settings. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Deletes a managed login branding style. When you delete a style, you delete the branding association for an app client. When an app client doesn't have a style assigned, your managed login pages for that app client are nonfunctional until you create a new style or switch the domain branding version. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func deleteManagedLoginBranding(_ input: DeleteManagedLoginBrandingRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -1835,7 +1835,7 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Deletes a managed login branding style. When you delete a style, you delete the branding association for an app client and restore it to default settings. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Deletes a managed login branding style. When you delete a style, you delete the branding association for an app client. When an app client doesn't have a style assigned, your managed login pages for that app client are nonfunctional until you create a new style or switch the domain branding version. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: /// - managedLoginBrandingId: The ID of the managed login branding style that you want to delete. @@ -1854,7 +1854,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.deleteManagedLoginBranding(input, logger: logger) } - /// Deletes a resource server. + /// Deletes a resource server. After you delete a resource server, users can no longer generate access tokens with scopes that are associate with that resource server. Resource servers are associated with custom scopes and machine-to-machine (M2M) authorization. For more information, see Access control with resource servers. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func deleteResourceServer(_ input: DeleteResourceServerRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -1867,11 +1867,11 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Deletes a resource server. + /// Deletes a resource server. After you delete a resource server, users can no longer generate access tokens with scopes that are associate with that resource server. Resource servers are associated with custom scopes and machine-to-machine (M2M) authorization. For more information, see Access control with resource servers. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - identifier: The identifier for the resource server. - /// - userPoolId: The user pool ID for the user pool that hosts the resource server. + /// - identifier: The identifier of the resource server that you want to delete. + /// - userPoolId: The ID of the user pool where you want to delete the resource server. /// - logger: Logger use during operation @inlinable public func deleteResourceServer( @@ -1886,7 +1886,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.deleteResourceServer(input, logger: logger) } - /// Allows a user to delete their own user profile. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. + /// Self-deletes a user profile. A deleted user profile can no longer be used to sign in and can't be restored. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. @Sendable @inlinable public func deleteUser(_ input: DeleteUserRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -1899,7 +1899,7 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Allows a user to delete their own user profile. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. + /// Self-deletes a user profile. A deleted user profile can no longer be used to sign in and can't be restored. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. /// /// Parameters: /// - accessToken: A valid access token that Amazon Cognito issued to the user whose user profile you want to delete. @@ -1915,7 +1915,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.deleteUser(input, logger: logger) } - /// Deletes the attributes for a user. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. + /// Self-deletes attributes for a user. For example, your application can submit a request to this operation when a user wants to remove their birthdate attribute value. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. @Sendable @inlinable public func deleteUserAttributes(_ input: DeleteUserAttributesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteUserAttributesResponse { @@ -1928,11 +1928,11 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Deletes the attributes for a user. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. + /// Self-deletes attributes for a user. For example, your application can submit a request to this operation when a user wants to remove their birthdate attribute value. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. /// /// Parameters: /// - accessToken: A valid access token that Amazon Cognito issued to the user whose attributes you want to delete. - /// - userAttributeNames: An array of strings representing the user attribute names you want to delete. For custom attributes, you must prependattach the custom: prefix to the front of the attribute name. + /// - userAttributeNames: An array of strings representing the user attribute names you want to delete. For custom attributes, you must prepend the custom: prefix to the attribute name, for example custom:department. /// - logger: Logger use during operation @inlinable public func deleteUserAttributes( @@ -1947,7 +1947,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.deleteUserAttributes(input, logger: logger) } - /// Deletes the specified Amazon Cognito user pool. + /// Deletes a user pool. After you delete a user pool, users can no longer sign in to any associated applications. @Sendable @inlinable public func deleteUserPool(_ input: DeleteUserPoolRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -1960,10 +1960,10 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Deletes the specified Amazon Cognito user pool. + /// Deletes a user pool. After you delete a user pool, users can no longer sign in to any associated applications. /// /// Parameters: - /// - userPoolId: The user pool ID for the user pool you want to delete. + /// - userPoolId: The ID of the user pool that you want to delete. /// - logger: Logger use during operation @inlinable public func deleteUserPool( @@ -1976,7 +1976,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.deleteUserPool(input, logger: logger) } - /// Allows the developer to delete the user pool client. + /// Deletes a user pool app client. After you delete an app client, users can no longer sign in to the associated application. @Sendable @inlinable public func deleteUserPoolClient(_ input: DeleteUserPoolClientRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -1989,11 +1989,11 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Allows the developer to delete the user pool client. + /// Deletes a user pool app client. After you delete an app client, users can no longer sign in to the associated application. /// /// Parameters: - /// - clientId: The app client ID of the app associated with the user pool. - /// - userPoolId: The user pool ID for the user pool where you want to delete the client. + /// - clientId: The ID of the user pool app client that you want to delete. + /// - userPoolId: The ID of the user pool where you want to delete the client. /// - logger: Logger use during operation @inlinable public func deleteUserPoolClient( @@ -2008,7 +2008,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.deleteUserPoolClient(input, logger: logger) } - /// Deletes a domain for a user pool. + /// Given a user pool ID and domain identifier, deletes a user pool domain. After you delete a user pool domain, your managed login pages and authorization server are no longer available. @Sendable @inlinable public func deleteUserPoolDomain(_ input: DeleteUserPoolDomainRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteUserPoolDomainResponse { @@ -2021,11 +2021,11 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Deletes a domain for a user pool. + /// Given a user pool ID and domain identifier, deletes a user pool domain. After you delete a user pool domain, your managed login pages and authorization server are no longer available. /// /// Parameters: - /// - domain: The domain string. For custom domains, this is the fully-qualified domain name, such as auth.example.com. For Amazon Cognito prefix domains, this is the prefix alone, such as auth. - /// - userPoolId: The user pool ID. + /// - domain: The domain that you want to delete. For custom domains, this is the fully-qualified domain name, such as auth.example.com. For Amazon Cognito prefix domains, this is the prefix alone, such as auth. + /// - userPoolId: The ID of the user pool where you want to delete the domain. /// - logger: Logger use during operation @inlinable public func deleteUserPoolDomain( @@ -2040,7 +2040,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.deleteUserPoolDomain(input, logger: logger) } - /// Deletes a registered passkey, or webauthN, device for the currently signed-in user. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. + /// Deletes a registered passkey, or webauthN, authenticator for the currently signed-in user. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. @Sendable @inlinable public func deleteWebAuthnCredential(_ input: DeleteWebAuthnCredentialRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteWebAuthnCredentialResponse { @@ -2053,11 +2053,11 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Deletes a registered passkey, or webauthN, device for the currently signed-in user. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. + /// Deletes a registered passkey, or webauthN, authenticator for the currently signed-in user. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. /// /// Parameters: - /// - accessToken: A valid access token that Amazon Cognito issued to the user whose passkey you want to delete. - /// - credentialId: The unique identifier of the passkey that you want to delete. Look up registered devices with ListWebAuthnCredentials. + /// - accessToken: A valid access token that Amazon Cognito issued to the user whose passkey credential you want to delete. + /// - credentialId: The unique identifier of the passkey that you want to delete. Look up registered devices with ListWebAuthnCredentials. /// - logger: Logger use during operation @inlinable public func deleteWebAuthnCredential( @@ -2072,7 +2072,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.deleteWebAuthnCredential(input, logger: logger) } - /// Gets information about a specific IdP. + /// Given a user pool ID and identity provider (IdP) name, returns details about the IdP. @Sendable @inlinable public func describeIdentityProvider(_ input: DescribeIdentityProviderRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeIdentityProviderResponse { @@ -2085,11 +2085,11 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Gets information about a specific IdP. + /// Given a user pool ID and identity provider (IdP) name, returns details about the IdP. /// /// Parameters: - /// - providerName: The IdP name. - /// - userPoolId: The user pool ID. + /// - providerName: The name of the IdP that you want to describe. + /// - userPoolId: The ID of the user pool that has the IdP that you want to describe.. /// - logger: Logger use during operation @inlinable public func describeIdentityProvider( @@ -2104,7 +2104,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.describeIdentityProvider(input, logger: logger) } - /// When given the ID of a managed login branding style, returns detailed information about the style. + /// Given the ID of a managed login branding style, returns detailed information about the style. @Sendable @inlinable public func describeManagedLoginBranding(_ input: DescribeManagedLoginBrandingRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeManagedLoginBrandingResponse { @@ -2117,7 +2117,7 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// When given the ID of a managed login branding style, returns detailed information about the style. + /// Given the ID of a managed login branding style, returns detailed information about the style. /// /// Parameters: /// - managedLoginBrandingId: The ID of the managed login branding style that you want to get more information about. @@ -2139,7 +2139,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.describeManagedLoginBranding(input, logger: logger) } - /// When given the ID of a user pool app client, returns detailed information about the style assigned to the app client. + /// Given the ID of a user pool app client, returns detailed information about the style assigned to the app client. @Sendable @inlinable public func describeManagedLoginBrandingByClient(_ input: DescribeManagedLoginBrandingByClientRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeManagedLoginBrandingByClientResponse { @@ -2152,7 +2152,7 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// When given the ID of a user pool app client, returns detailed information about the style assigned to the app client. + /// Given the ID of a user pool app client, returns detailed information about the style assigned to the app client. /// /// Parameters: /// - clientId: The app client that's assigned to the branding style that you want more information about. @@ -2174,7 +2174,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.describeManagedLoginBrandingByClient(input, logger: logger) } - /// Describes a resource server. + /// Describes a resource server. For more information about resource servers, see Access control with resource servers. @Sendable @inlinable public func describeResourceServer(_ input: DescribeResourceServerRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeResourceServerResponse { @@ -2187,11 +2187,11 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Describes a resource server. + /// Describes a resource server. For more information about resource servers, see Access control with resource servers. /// /// Parameters: /// - identifier: A unique resource server identifier for the resource server. The identifier can be an API friendly name like solar-system-data. You can also set an API URL like https://solar-system-data-api.example.com as your identifier. Amazon Cognito represents scopes in the access token in the format $resource-server-identifier/$scope. Longer scope-identifier strings increase the size of your access tokens. - /// - userPoolId: The user pool ID for the user pool that hosts the resource server. + /// - userPoolId: The ID of the user pool that hosts the resource server. /// - logger: Logger use during operation @inlinable public func describeResourceServer( @@ -2206,7 +2206,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.describeResourceServer(input, logger: logger) } - /// Describes the risk configuration. + /// Given an app client or user pool ID where threat protection is configured, describes the risk configuration. This operation returns details about adaptive authentication, compromised credentials, and IP-address allow- and denylists. For more information about threat protection, see Threat protection. @Sendable @inlinable public func describeRiskConfiguration(_ input: DescribeRiskConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeRiskConfigurationResponse { @@ -2219,11 +2219,11 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Describes the risk configuration. + /// Given an app client or user pool ID where threat protection is configured, describes the risk configuration. This operation returns details about adaptive authentication, compromised credentials, and IP-address allow- and denylists. For more information about threat protection, see Threat protection. /// /// Parameters: - /// - clientId: The app client ID. - /// - userPoolId: The user pool ID. + /// - clientId: The ID of the app client with the risk configuration that you want to inspect. You can apply default risk configuration at the user pool level and further customize it from user pool defaults at the app-client level. Specify ClientId to inspect client-level configuration, or UserPoolId to inspect pool-level configuration. + /// - userPoolId: The ID of the user pool with the risk configuration that you want to inspect. You can apply default risk configuration at the user pool level and further customize it from user pool defaults at the app-client level. Specify ClientId to inspect client-level configuration, or UserPoolId to inspect pool-level configuration. /// - logger: Logger use during operation @inlinable public func describeRiskConfiguration( @@ -2238,7 +2238,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.describeRiskConfiguration(input, logger: logger) } - /// Describes the user import job. + /// Describes a user import job. For more information about user CSV import, see Importing users from a CSV file. @Sendable @inlinable public func describeUserImportJob(_ input: DescribeUserImportJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeUserImportJobResponse { @@ -2251,11 +2251,11 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Describes the user import job. + /// Describes a user import job. For more information about user CSV import, see Importing users from a CSV file. /// /// Parameters: - /// - jobId: The job ID for the user import job. - /// - userPoolId: The user pool ID for the user pool that the users are being imported into. + /// - jobId: The Id of the user import job that you want to describe. + /// - userPoolId: The ID of the user pool that's associated with the import job. /// - logger: Logger use during operation @inlinable public func describeUserImportJob( @@ -2270,7 +2270,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.describeUserImportJob(input, logger: logger) } - /// Returns the configuration information and metadata of the specified user pool. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Given a user pool ID, returns configuration information. This operation is useful when you want to inspect an existing user pool and programmatically replicate the configuration to another user pool. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func describeUserPool(_ input: DescribeUserPoolRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeUserPoolResponse { @@ -2283,10 +2283,10 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Returns the configuration information and metadata of the specified user pool. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Given a user pool ID, returns configuration information. This operation is useful when you want to inspect an existing user pool and programmatically replicate the configuration to another user pool. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - userPoolId: The user pool ID for the user pool you want to describe. + /// - userPoolId: The ID of the user pool you want to describe. /// - logger: Logger use during operation @inlinable public func describeUserPool( @@ -2299,7 +2299,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.describeUserPool(input, logger: logger) } - /// Client method for returning the configuration information and metadata of the specified user pool app client. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Given an app client ID, returns configuration information. This operation is useful when you want to inspect an existing app client and programmatically replicate the configuration to another app client. For more information about app clients, see App clients. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func describeUserPoolClient(_ input: DescribeUserPoolClientRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeUserPoolClientResponse { @@ -2312,11 +2312,11 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Client method for returning the configuration information and metadata of the specified user pool app client. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Given an app client ID, returns configuration information. This operation is useful when you want to inspect an existing app client and programmatically replicate the configuration to another app client. For more information about app clients, see App clients. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - clientId: The app client ID of the app associated with the user pool. - /// - userPoolId: The user pool ID for the user pool you want to describe. + /// - clientId: The ID of the app client that you want to describe. + /// - userPoolId: The ID of the user pool that contains the app client you want to describe. /// - logger: Logger use during operation @inlinable public func describeUserPoolClient( @@ -2331,7 +2331,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.describeUserPoolClient(input, logger: logger) } - /// Gets information about a domain. + /// Given a user pool domain name, returns information about the domain configuration. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func describeUserPoolDomain(_ input: DescribeUserPoolDomainRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeUserPoolDomainResponse { @@ -2344,10 +2344,10 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Gets information about a domain. + /// Given a user pool domain name, returns information about the domain configuration. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: - /// - domain: The domain string. For custom domains, this is the fully-qualified domain name, such as auth.example.com. For Amazon Cognito prefix domains, this is the prefix alone, such as auth. + /// - domain: The domain that you want to describe. For custom domains, this is the fully-qualified domain name, such as auth.example.com. For Amazon Cognito prefix domains, this is the prefix alone, such as auth. /// - logger: Logger use during operation @inlinable public func describeUserPoolDomain( @@ -2411,7 +2411,7 @@ public struct CognitoIdentityProvider: AWSService { /// - analyticsMetadata: The Amazon Pinpoint analytics metadata that contributes to your metrics for ForgotPassword calls. /// - clientId: The ID of the client associated with the user pool. /// - clientMetadata: A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the ForgotPassword API action, Amazon Cognito invokes any functions that are assigned to the following triggers: pre sign-up, custom message, and user migration. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ForgotPassword request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see - /// - secretHash: A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. + /// - secretHash: A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. For more information about SecretHash, see Computing secret hash values. /// - userContextData: Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. /// - logger: Logger use during operation @@ -2452,7 +2452,7 @@ public struct CognitoIdentityProvider: AWSService { /// Gets the header information for the comma-separated value (CSV) file to be used as input for the user import job. /// /// Parameters: - /// - userPoolId: The user pool ID for the user pool that the users are to be imported into. + /// - userPoolId: The ID of the user pool that the users are to be imported into. /// - logger: Logger use during operation @inlinable public func getCSVHeader( @@ -2514,7 +2514,7 @@ public struct CognitoIdentityProvider: AWSService { /// /// Parameters: /// - groupName: The name of the group. - /// - userPoolId: The user pool ID for the user pool. + /// - userPoolId: The ID of the user pool. /// - logger: Logger use during operation @inlinable public func getGroup( @@ -2636,7 +2636,7 @@ public struct CognitoIdentityProvider: AWSService { /// /// Parameters: /// - clientId: The client ID for the client app. - /// - userPoolId: The user pool ID for the user pool. + /// - userPoolId: The ID of the user pool. /// - logger: Logger use during operation @inlinable public func getUICustomization( @@ -2773,7 +2773,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.getUserPoolMfaConfig(input, logger: logger) } - /// Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call this operation when your user signs out of your app. This results in the following behavior. Amazon Cognito no longer accepts token-authorized user operations that you authorize with a signed-out user's access tokens. For more information, see Using the Amazon Cognito user pools API and user pool endpoints. Amazon Cognito returns an Access Token has been revoked error when your app attempts to authorize a user pools API request with a revoked access token that contains the scope aws.cognito.signin.user.admin. Amazon Cognito no longer accepts a signed-out user's ID token in a GetId request to an identity pool with ServerSideTokenCheck enabled for its user pool IdP configuration in CognitoIdentityProvider. Amazon Cognito no longer accepts a signed-out user's refresh tokens in refresh requests. Other requests might be valid until your user's token expires. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. + /// Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call this operation when your user signs out of your app. This results in the following behavior. Amazon Cognito no longer accepts token-authorized user operations that you authorize with a signed-out user's access tokens. For more information, see Using the Amazon Cognito user pools API and user pool endpoints. Amazon Cognito returns an Access Token has been revoked error when your app attempts to authorize a user pools API request with a revoked access token that contains the scope aws.cognito.signin.user.admin. Amazon Cognito no longer accepts a signed-out user's ID token in a GetId request to an identity pool with ServerSideTokenCheck enabled for its user pool IdP configuration in CognitoIdentityProvider. Amazon Cognito no longer accepts a signed-out user's refresh tokens in refresh requests. Other requests might be valid until your user's token expires. This operation doesn't clear the managed login session cookie. To clear the session for a user who signed in with managed login or the classic hosted UI, direct their browser session to the logout endpoint. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. @Sendable @inlinable public func globalSignOut(_ input: GlobalSignOutRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GlobalSignOutResponse { @@ -2786,7 +2786,7 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call this operation when your user signs out of your app. This results in the following behavior. Amazon Cognito no longer accepts token-authorized user operations that you authorize with a signed-out user's access tokens. For more information, see Using the Amazon Cognito user pools API and user pool endpoints. Amazon Cognito returns an Access Token has been revoked error when your app attempts to authorize a user pools API request with a revoked access token that contains the scope aws.cognito.signin.user.admin. Amazon Cognito no longer accepts a signed-out user's ID token in a GetId request to an identity pool with ServerSideTokenCheck enabled for its user pool IdP configuration in CognitoIdentityProvider. Amazon Cognito no longer accepts a signed-out user's refresh tokens in refresh requests. Other requests might be valid until your user's token expires. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. + /// Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call this operation when your user signs out of your app. This results in the following behavior. Amazon Cognito no longer accepts token-authorized user operations that you authorize with a signed-out user's access tokens. For more information, see Using the Amazon Cognito user pools API and user pool endpoints. Amazon Cognito returns an Access Token has been revoked error when your app attempts to authorize a user pools API request with a revoked access token that contains the scope aws.cognito.signin.user.admin. Amazon Cognito no longer accepts a signed-out user's ID token in a GetId request to an identity pool with ServerSideTokenCheck enabled for its user pool IdP configuration in CognitoIdentityProvider. Amazon Cognito no longer accepts a signed-out user's refresh tokens in refresh requests. Other requests might be valid until your user's token expires. This operation doesn't clear the managed login session cookie. To clear the session for a user who signed in with managed login or the classic hosted UI, direct their browser session to the logout endpoint. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. /// /// Parameters: /// - accessToken: A valid access token that Amazon Cognito issued to the user who you want to sign out. @@ -2819,7 +2819,7 @@ public struct CognitoIdentityProvider: AWSService { /// /// Parameters: /// - analyticsMetadata: The Amazon Pinpoint analytics metadata that contributes to your metrics for InitiateAuth calls. - /// - authFlow: The authentication flow that you want to initiate. The AuthParameters that you must submit are linked to the flow that you submit. For example: USER_AUTH: Request a preferred authentication type or review available authentication types. From the offered authentication types, select one in a challenge response and then authenticate with that method in an additional challenge response. REFRESH_TOKEN_AUTH: Receive new ID and access tokens when you pass a REFRESH_TOKEN parameter with a valid refresh token as the value. USER_SRP_AUTH: Receive secure remote password (SRP) variables for the next challenge, PASSWORD_VERIFIER, when you pass USERNAME and SRP_A parameters. USER_PASSWORD_AUTH: Receive new tokens or the next challenge, for example SOFTWARE_TOKEN_MFA, when you pass USERNAME and PASSWORD parameters. Valid values include the following: USER_AUTH The entry point for sign-in with passwords, one-time passwords, biometric devices, and security keys. USER_SRP_AUTH Username-password authentication with the Secure Remote Password (SRP) protocol. For more information, see Use SRP password verification in custom authentication flow. REFRESH_TOKEN_AUTH and REFRESH_TOKEN Provide a valid refresh token and receive new ID and access tokens. For more information, see Using the refresh token. CUSTOM_AUTH Custom authentication with Lambda triggers. For more information, see Custom authentication challenge Lambda triggers. USER_PASSWORD_AUTH Username-password authentication with the password sent directly in the request. For more information, see Admin authentication flow. ADMIN_USER_PASSWORD_AUTH is a flow type of AdminInitiateAuth and isn't valid for InitiateAuth. ADMIN_NO_SRP_AUTH is a legacy server-side username-password flow and isn't valid for InitiateAuth. + /// - authFlow: The authentication flow that you want to initiate. Each AuthFlow has linked AuthParameters that you must submit. The following are some example flows and their parameters. USER_AUTH: Request a preferred authentication type or review available authentication types. From the offered authentication types, select one in a challenge response and then authenticate with that method in an additional challenge response. REFRESH_TOKEN_AUTH: Receive new ID and access tokens when you pass a REFRESH_TOKEN parameter with a valid refresh token as the value. USER_SRP_AUTH: Receive secure remote password (SRP) variables for the next challenge, PASSWORD_VERIFIER, when you pass USERNAME and SRP_A parameters. USER_PASSWORD_AUTH: Receive new tokens or the next challenge, for example SOFTWARE_TOKEN_MFA, when you pass USERNAME and PASSWORD parameters. All flows USER_AUTH The entry point for sign-in with passwords, one-time passwords, and WebAuthN authenticators. USER_SRP_AUTH Username-password authentication with the Secure Remote Password (SRP) protocol. For more information, see Use SRP password verification in custom authentication flow. REFRESH_TOKEN_AUTH and REFRESH_TOKEN Provide a valid refresh token and receive new ID and access tokens. For more information, see Using the refresh token. CUSTOM_AUTH Custom authentication with Lambda triggers. For more information, see Custom authentication challenge Lambda triggers. USER_PASSWORD_AUTH Username-password authentication with the password sent directly in the request. For more information, see Admin authentication flow. ADMIN_USER_PASSWORD_AUTH is a flow type of AdminInitiateAuth and isn't valid for InitiateAuth. ADMIN_NO_SRP_AUTH is a legacy server-side username-password flow and isn't valid for InitiateAuth. /// - authParameters: The authentication parameters. These are inputs corresponding to the AuthFlow that you're invoking. The required values depend on the value of AuthFlow: For USER_AUTH: USERNAME (required), PREFERRED_CHALLENGE. If you don't provide a value for PREFERRED_CHALLENGE, Amazon Cognito responds with the AvailableChallenges parameter that specifies the available sign-in methods. For USER_SRP_AUTH: USERNAME (required), SRP_A (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY. For USER_PASSWORD_AUTH: USERNAME (required), PASSWORD (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY. For REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY. For CUSTOM_AUTH: USERNAME (required), SECRET_HASH (if app client is configured with client secret), DEVICE_KEY. To start the authentication flow with password verification, include ChallengeName: SRP_A and SRP_A: (The SRP_A Value). For more information about SECRET_HASH, see Computing secret hash values. For information about DEVICE_KEY, see Working with user devices in your user pool. /// - clientId: The app client ID. /// - clientMetadata: A map of custom key-value pairs that you can provide as input for certain custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the InitiateAuth API action, Amazon Cognito invokes the Lambda functions that are specified for various triggers. The ClientMetadata value is passed as input to the functions for only the following triggers: Pre signup Pre authentication User migration When Amazon Cognito invokes the functions for these triggers, it passes a JSON payload, which the function receives as input. This payload contains a validationData attribute, which provides the data that you assigned to the ClientMetadata parameter in your InitiateAuth request. In your function code in Lambda, you can process the validationData value to enhance your workflow for your specific needs. When you use the InitiateAuth API action, Amazon Cognito also invokes the functions for the following triggers, but it doesn't provide the ClientMetadata value as input: Post authentication Custom message Pre token generation Create auth challenge Define auth challenge Custom email sender Custom SMS sender For more information, see @@ -2902,7 +2902,7 @@ public struct CognitoIdentityProvider: AWSService { /// Parameters: /// - limit: The limit of the request to list groups. /// - nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list. - /// - userPoolId: The user pool ID for the user pool. + /// - userPoolId: The ID of the user pool. /// - logger: Logger use during operation @inlinable public func listGroups( @@ -2972,7 +2972,7 @@ public struct CognitoIdentityProvider: AWSService { /// Parameters: /// - maxResults: The maximum number of resource servers to return. /// - nextToken: A pagination token. - /// - userPoolId: The user pool ID for the user pool. + /// - userPoolId: The ID of the user pool. /// - logger: Logger use during operation @inlinable public func listResourceServers( @@ -3036,7 +3036,7 @@ public struct CognitoIdentityProvider: AWSService { /// Parameters: /// - maxResults: The maximum number of import jobs you want the request to return. /// - paginationToken: This API operation returns a limited number of results. The pagination token is - /// - userPoolId: The user pool ID for the user pool that the users are being imported into. + /// - userPoolId: The ID of the user pool that the users are being imported into. /// - logger: Logger use during operation @inlinable public func listUserImportJobs( @@ -3071,7 +3071,7 @@ public struct CognitoIdentityProvider: AWSService { /// Parameters: /// - maxResults: The maximum number of results you want the request to return when listing the user pool clients. /// - nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list. - /// - userPoolId: The user pool ID for the user pool where you want to list user pool clients. + /// - userPoolId: The ID of the user pool where you want to list user pool clients. /// - logger: Logger use during operation @inlinable public func listUserPoolClients( @@ -3140,7 +3140,7 @@ public struct CognitoIdentityProvider: AWSService { /// - filter: A filter string of the form "AttributeName Filter-Type "AttributeValue". Quotation marks within the filter string must be escaped using the backslash (\) character. For example, "family_name = \"Reddy\"". AttributeName: The name of the attribute to search for. You can only search for one attribute at a time. Filter-Type: For an exact match, use =, for example, "given_name = \"Jon\"". For a prefix ("starts with") match, use ^=, for example, "given_name ^= \"Jon\"". AttributeValue: The attribute value that must be matched for each user. If the filter string is empty, ListUsers returns all users in the user pool. You can only search for the following standard attributes: username (case-sensitive) email phone_number name given_name family_name preferred_username cognito:user_status (called Status in the Console) (case-insensitive) status (called Enabled in the Console) (case-sensitive) sub Custom attributes aren't searchable. You can also list users with a client-side filter. The server-side filter matches no more than one attribute. For an advanced search, use a client-side filter with the --query parameter of the list-users action in the CLI. When you use a client-side filter, ListUsers returns a paginated list of zero or more users. You can receive multiple pages in a row with zero results. Repeat the query with each pagination token that is returned until you receive a null pagination token value, and then review the combined result. For more information about server-side and client-side filtering, see FilteringCLI output in the Command Line Interface User Guide. For more information, see Searching for Users Using the ListUsers API and Examples of Using the ListUsers API in the Amazon Cognito Developer Guide. /// - limit: Maximum number of users to be returned. /// - paginationToken: This API operation returns a limited number of results. The pagination token is - /// - userPoolId: The user pool ID for the user pool on which the search should be performed. + /// - userPoolId: The ID of the user pool on which the search should be performed. /// - logger: Logger use during operation @inlinable public func listUsers( @@ -3180,7 +3180,7 @@ public struct CognitoIdentityProvider: AWSService { /// - groupName: The name of the group. /// - limit: The maximum number of users that you want to retrieve before pagination. /// - nextToken: An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list. - /// - userPoolId: The user pool ID for the user pool. + /// - userPoolId: The ID of the user pool. /// - logger: Logger use during operation @inlinable public func listUsersInGroup( @@ -3253,7 +3253,7 @@ public struct CognitoIdentityProvider: AWSService { /// - analyticsMetadata: The Amazon Pinpoint analytics metadata that contributes to your metrics for ResendConfirmationCode calls. /// - clientId: The ID of the client associated with the user pool. /// - clientMetadata: A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the ResendConfirmationCode API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ResendConfirmationCode request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see - /// - secretHash: A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. + /// - secretHash: A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. For more information about SecretHash, see Computing secret hash values. /// - userContextData: Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. /// - logger: Logger use during operation @@ -3452,7 +3452,7 @@ public struct CognitoIdentityProvider: AWSService { /// - clientId: The client ID for the client app. /// - css: The CSS values in the UI customization. /// - imageFile: The uploaded logo image for the UI customization. - /// - userPoolId: The user pool ID for the user pool. + /// - userPoolId: The ID of the user pool. /// - logger: Logger use during operation @inlinable public func setUICustomization( @@ -3471,7 +3471,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.setUICustomization(input, logger: logger) } - /// Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. + /// Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool. This operation doesn't reset an existing TOTP MFA for a user. To register a new TOTP factor for a user, make an AssociateSoftwareToken request. For more information, see TOTP software token MFA. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. @Sendable @inlinable public func setUserMFAPreference(_ input: SetUserMFAPreferenceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SetUserMFAPreferenceResponse { @@ -3484,7 +3484,7 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. + /// Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool. This operation doesn't reset an existing TOTP MFA for a user. To register a new TOTP factor for a user, make an AssociateSoftwareToken request. For more information, see TOTP software token MFA. Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin. Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints. /// /// Parameters: /// - accessToken: A valid access token that Amazon Cognito issued to the user whose MFA preference you want to set. @@ -3605,7 +3605,7 @@ public struct CognitoIdentityProvider: AWSService { /// - clientId: The ID of the client associated with the user pool. /// - clientMetadata: A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the SignUp API action, Amazon Cognito invokes any functions that are assigned to the following triggers: pre sign-up, custom message, and post confirmation. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your SignUp request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see /// - password: The password of the user you want to register. Users can sign up without a password when your user pool supports passwordless sign-in with email or SMS OTPs. To create a user with no password, omit this parameter or submit a blank value. You can only create a passwordless user when passwordless sign-in is available. See the SignInPolicyType property of CreateUserPool and UpdateUserPool. - /// - secretHash: A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. + /// - secretHash: A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. For more information about SecretHash, see Computing secret hash values. /// - userAttributes: An array of name-value pairs representing user attributes. For custom attributes, you must prepend the custom: prefix to the attribute name. /// - userContextData: Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced /// - username: The username of the user that you want to sign up. The value of this parameter is typically a username, but can be any alias attribute in your user pool. @@ -3655,7 +3655,7 @@ public struct CognitoIdentityProvider: AWSService { /// /// Parameters: /// - jobId: The job ID for the user import job. - /// - userPoolId: The user pool ID for the user pool that the users are being imported into. + /// - userPoolId: The ID of the user pool that the users are being imported into. /// - logger: Logger use during operation @inlinable public func startUserImportJob( @@ -3716,7 +3716,7 @@ public struct CognitoIdentityProvider: AWSService { /// /// Parameters: /// - jobId: The job ID for the user import job. - /// - userPoolId: The user pool ID for the user pool that the users are being imported into. + /// - userPoolId: The ID of the user pool that the users are being imported into. /// - logger: Logger use during operation @inlinable public func stopUserImportJob( @@ -3891,7 +3891,7 @@ public struct CognitoIdentityProvider: AWSService { /// - groupName: The name of the group. /// - precedence: The new precedence value for the group. For more information about this parameter, see CreateGroup. /// - roleArn: The new role Amazon Resource Name (ARN) for the group. This is used for setting the cognito:roles and cognito:preferred_role claims in the token. - /// - userPoolId: The user pool ID for the user pool. + /// - userPoolId: The ID of the user pool. /// - logger: Logger use during operation @inlinable public func updateGroup( @@ -3953,7 +3953,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.updateIdentityProvider(input, logger: logger) } - /// Configures the branding settings for a user pool style. This operation is the programmatic option for the configuration of a style in the branding designer. Provides values for UI customization in a Settings JSON object and image files in an Assets array. This operation has a 2-megabyte request-size limit and include the CSS settings and image assets for your app client. Your branding settings might exceed 2MB in size. Amazon Cognito doesn't require that you pass all parameters in one request and preserves existing style settings that you don't specify. If your request is larger than 2MB, separate it into multiple requests, each with a size smaller than the limit. For more information, see API and SDK operations for managed login branding. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Configures the branding settings for a user pool style. This operation is the programmatic option for the configuration of a style in the branding designer. Provides values for UI customization in a Settings JSON object and image files in an Assets array. This operation has a 2-megabyte request-size limit and include the CSS settings and image assets for your app client. Your branding settings might exceed 2MB in size. Amazon Cognito doesn't require that you pass all parameters in one request and preserves existing style settings that you don't specify. If your request is larger than 2MB, separate it into multiple requests, each with a size smaller than the limit. As a best practice, modify the output of DescribeManagedLoginBrandingByClient into the request parameters for this operation. To get all settings, set ReturnMergedResources to true. For more information, see API and SDK operations for managed login branding Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func updateManagedLoginBranding(_ input: UpdateManagedLoginBrandingRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateManagedLoginBrandingResponse { @@ -3966,7 +3966,7 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Configures the branding settings for a user pool style. This operation is the programmatic option for the configuration of a style in the branding designer. Provides values for UI customization in a Settings JSON object and image files in an Assets array. This operation has a 2-megabyte request-size limit and include the CSS settings and image assets for your app client. Your branding settings might exceed 2MB in size. Amazon Cognito doesn't require that you pass all parameters in one request and preserves existing style settings that you don't specify. If your request is larger than 2MB, separate it into multiple requests, each with a size smaller than the limit. For more information, see API and SDK operations for managed login branding. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// Configures the branding settings for a user pool style. This operation is the programmatic option for the configuration of a style in the branding designer. Provides values for UI customization in a Settings JSON object and image files in an Assets array. This operation has a 2-megabyte request-size limit and include the CSS settings and image assets for your app client. Your branding settings might exceed 2MB in size. Amazon Cognito doesn't require that you pass all parameters in one request and preserves existing style settings that you don't specify. If your request is larger than 2MB, separate it into multiple requests, each with a size smaller than the limit. As a best practice, modify the output of DescribeManagedLoginBrandingByClient into the request parameters for this operation. To get all settings, set ReturnMergedResources to true. For more information, see API and SDK operations for managed login branding Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: /// - assets: An array of image files that you want to apply to roles like backgrounds, logos, and icons. Each object must also indicate whether it is for dark mode, light mode, or browser-adaptive mode. @@ -4013,7 +4013,7 @@ public struct CognitoIdentityProvider: AWSService { /// - identifier: A unique resource server identifier for the resource server. The identifier can be an API friendly name like solar-system-data. You can also set an API URL like https://solar-system-data-api.example.com as your identifier. Amazon Cognito represents scopes in the access token in the format $resource-server-identifier/$scope. Longer scope-identifier strings increase the size of your access tokens. /// - name: The name of the resource server. /// - scopes: The scope values to be set for the resource server. - /// - userPoolId: The user pool ID for the user pool. + /// - userPoolId: The ID of the user pool. /// - logger: Logger use during operation @inlinable public func updateResourceServer( @@ -4100,7 +4100,7 @@ public struct CognitoIdentityProvider: AWSService { /// - smsVerificationMessage: This parameter is no longer used. See VerificationMessageTemplateType. /// - userAttributeUpdateSettings: The settings for updates to user attributes. These settings include the property AttributesRequireVerificationBeforeUpdate, /// - userPoolAddOns: User pool add-ons. Contains settings for activation of advanced security features. To log user security information but take no action, set to AUDIT. To configure automatic security responses to risky traffic to your user pool, set to ENFORCED. For more information, see Adding advanced security to a user pool. - /// - userPoolId: The user pool ID for the user pool you want to update. + /// - userPoolId: The ID of the user pool you want to update. /// - userPoolTags: The tag keys and values to assign to the user pool. A tag is a label that you can use to categorize and manage user pools in different ways, such as by purpose, owner, environment, or other criteria. /// - userPoolTier: The user pool feature plan, or tier. This parameter determines the eligibility of the user pool for features like managed login, access-token customization, and threat protection. Defaults to ESSENTIALS. /// - verificationMessageTemplate: The template for verification messages. @@ -4190,9 +4190,9 @@ public struct CognitoIdentityProvider: AWSService { /// - preventUserExistenceErrors: Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to ENABLED and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to LEGACY, those APIs return a UserNotFoundException exception if the user doesn't exist in the user pool. Valid values include: ENABLED - This prevents user existence-related errors. LEGACY - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented. Defaults to LEGACY when you don't provide a value. /// - readAttributes: The list of user attributes that you want your app client to have read access to. After your user authenticates in your app, their access token authorizes them to read their own attribute value for any attribute in this list. An example of this kind of activity is when your user selects a link to view their profile information. Your app makes a GetUser API request to retrieve and display your user's profile data. When you don't specify the ReadAttributes for your app client, your app can read the values of email_verified, phone_number_verified, and the Standard attributes of your user pool. When your user pool app client has read access to these default attributes, ReadAttributes doesn't return any information. Amazon Cognito only populates ReadAttributes in the API response if you have specified your own custom set of read attributes. /// - refreshTokenValidity: The refresh token time limit. After this limit expires, your user can't use - /// - supportedIdentityProviders: A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: COGNITO, Facebook, Google, SignInWithApple, and LoginWithAmazon. You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example MySAMLIdP or MyOIDCIdP. This setting applies to providers that you can access with the hosted UI and OAuth 2.0 authorization server. The removal of COGNITO from this list doesn't prevent authentication operations for local users with the user pools API in an Amazon Web Services SDK. The only way to prevent API-based authentication is to block access with a WAF rule. + /// - supportedIdentityProviders: A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: COGNITO, Facebook, Google, SignInWithApple, and LoginWithAmazon. You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example MySAMLIdP or MyOIDCIdP. This setting applies to providers that you can access with managed login. The removal of COGNITO from this list doesn't prevent authentication operations for local users with the user pools API in an Amazon Web Services SDK. The only way to prevent API-based authentication is to block access with a WAF rule. /// - tokenValidityUnits: The time units you use when you set the duration of ID, access, and refresh tokens. The default unit for RefreshToken is days, and the default for ID and access tokens is hours. - /// - userPoolId: The user pool ID for the user pool where you want to update the user pool client. + /// - userPoolId: The ID of the user pool where you want to update the user pool client. /// - writeAttributes: The list of user attributes that you want your app client to have write access to. After your user authenticates in your app, their access token authorizes them to set or modify their own attribute value for any attribute in this list. An example of this kind of activity is when you present your user with a form to update their profile information and they change their last name. Your app then makes an UpdateUserAttributes API request and sets family_name to the new value. When you don't specify the WriteAttributes for your app client, your app can write the values of the Standard attributes of your user pool. When your user pool has write access to these default attributes, WriteAttributes doesn't return any information. Amazon Cognito only populates WriteAttributes in the API response if you have specified your own custom set of write attributes. If your app client allows users to sign in through an IdP, this array must include all attributes that you have mapped to IdP attributes. Amazon Cognito updates mapped attributes when users sign in to your application through an IdP. If your app client does not have write access to a mapped attribute, Amazon Cognito throws an error when it tries to update the attribute. For more information, see Specifying IdP Attribute Mappings for Your user pool. /// - logger: Logger use during operation @inlinable @@ -4248,7 +4248,7 @@ public struct CognitoIdentityProvider: AWSService { return try await self.updateUserPoolClient(input, logger: logger) } - /// Updates the Secure Sockets Layer (SSL) certificate for the custom domain for your user pool. You can use this operation to provide the Amazon Resource Name (ARN) of a new certificate to Amazon Cognito. You can't use it to change the domain for a user pool. A custom domain is used to host the Amazon Cognito hosted UI, which provides sign-up and sign-in pages for your application. When you set up a custom domain, you provide a certificate that you manage with Certificate Manager (ACM). When necessary, you can use this operation to change the certificate that you applied to your custom domain. Usually, this is unnecessary following routine certificate renewal with ACM. When you renew your existing certificate in ACM, the ARN for your certificate remains the same, and your custom domain uses the new certificate automatically. However, if you replace your existing certificate with a new one, ACM gives the new certificate a new ARN. To apply the new certificate to your custom domain, you must provide this ARN to Amazon Cognito. When you add your new certificate in ACM, you must choose US East (N. Virginia) as the Amazon Web Services Region. After you submit your request, Amazon Cognito requires up to 1 hour to distribute your new certificate to your custom domain. For more information about adding a custom domain to your user pool, see Using Your Own Domain for the Hosted UI. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// A user pool domain hosts managed login, an authorization server and web server for authentication in your application. This operation updates the branding version for user pool domains between 1 for hosted UI (classic) and 2 for managed login. It also updates the SSL certificate for user pool custom domains. Changes to the domain branding version take up to one minute to take effect for a prefix domain and up to five minutes for a custom domain. This operation doesn't change the name of your user pool domain. To change your domain, delete it with DeleteUserPoolDomain and create a new domain with CreateUserPoolDomain. You can pass the ARN of a new Certificate Manager certificate in this request. Typically, ACM certificates automatically renew and you user pool can continue to use the same ARN. But if you generate a new certificate for your custom domain name, replace the original configuration with the new ARN in this request. ACM certificates for custom domains must be in the US East (N. Virginia) Amazon Web Services Region. After you submit your request, Amazon Cognito requires up to 1 hour to distribute your new certificate to your custom domain. For more information about adding a custom domain to your user pool, see Configuring a user pool domain. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @Sendable @inlinable public func updateUserPoolDomain(_ input: UpdateUserPoolDomainRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateUserPoolDomainResponse { @@ -4261,7 +4261,7 @@ public struct CognitoIdentityProvider: AWSService { logger: logger ) } - /// Updates the Secure Sockets Layer (SSL) certificate for the custom domain for your user pool. You can use this operation to provide the Amazon Resource Name (ARN) of a new certificate to Amazon Cognito. You can't use it to change the domain for a user pool. A custom domain is used to host the Amazon Cognito hosted UI, which provides sign-up and sign-in pages for your application. When you set up a custom domain, you provide a certificate that you manage with Certificate Manager (ACM). When necessary, you can use this operation to change the certificate that you applied to your custom domain. Usually, this is unnecessary following routine certificate renewal with ACM. When you renew your existing certificate in ACM, the ARN for your certificate remains the same, and your custom domain uses the new certificate automatically. However, if you replace your existing certificate with a new one, ACM gives the new certificate a new ARN. To apply the new certificate to your custom domain, you must provide this ARN to Amazon Cognito. When you add your new certificate in ACM, you must choose US East (N. Virginia) as the Amazon Web Services Region. After you submit your request, Amazon Cognito requires up to 1 hour to distribute your new certificate to your custom domain. For more information about adding a custom domain to your user pool, see Using Your Own Domain for the Hosted UI. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints + /// A user pool domain hosts managed login, an authorization server and web server for authentication in your application. This operation updates the branding version for user pool domains between 1 for hosted UI (classic) and 2 for managed login. It also updates the SSL certificate for user pool custom domains. Changes to the domain branding version take up to one minute to take effect for a prefix domain and up to five minutes for a custom domain. This operation doesn't change the name of your user pool domain. To change your domain, delete it with DeleteUserPoolDomain and create a new domain with CreateUserPoolDomain. You can pass the ARN of a new Certificate Manager certificate in this request. Typically, ACM certificates automatically renew and you user pool can continue to use the same ARN. But if you generate a new certificate for your custom domain name, replace the original configuration with the new ARN in this request. ACM certificates for custom domains must be in the US East (N. Virginia) Amazon Web Services Region. After you submit your request, Amazon Cognito requires up to 1 hour to distribute your new certificate to your custom domain. For more information about adding a custom domain to your user pool, see Configuring a user pool domain. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints /// /// Parameters: /// - customDomainConfig: The configuration for a custom domain that hosts the sign-up and sign-in pages for your application. Use this object to specify an SSL certificate that is managed by ACM. When you create a custom domain, the passkey RP ID defaults to the custom domain. If you had a prefix domain active, this will cause passkey integration for your prefix domain to stop working due to a mismatch in RP ID. To keep the prefix domain passkey integration working, you can explicitly set RP ID to the prefix domain. Update the RP ID in a SetUserPoolMfaConfig request. @@ -4271,7 +4271,7 @@ public struct CognitoIdentityProvider: AWSService { /// - logger: Logger use during operation @inlinable public func updateUserPoolDomain( - customDomainConfig: CustomDomainConfigType, + customDomainConfig: CustomDomainConfigType? = nil, domain: String, managedLoginVersion: Int? = nil, userPoolId: String, @@ -4394,9 +4394,9 @@ extension CognitoIdentityProvider { /// Return PaginatorSequence for operation ``adminListGroupsForUser(_:logger:)``. /// /// - Parameters: - /// - limit: The limit of the request to list groups. + /// - limit: The maximum number of groups that you want Amazon Cognito to return in the response. /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. - /// - userPoolId: The user pool ID for the user pool. + /// - userPoolId: The ID of the user pool where you want to view a user's groups. /// - logger: Logger used for logging @inlinable public func adminListGroupsForUserPaginator( @@ -4436,7 +4436,7 @@ extension CognitoIdentityProvider { /// - Parameters: /// - maxResults: The maximum number of authentication events to return. Returns 60 events if you set MaxResults to 0, or if you don't include a MaxResults parameter. /// - username: The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. - /// - userPoolId: The user pool ID. + /// - userPoolId: The Id of the user pool that contains the user profile with the logged events. /// - logger: Logger used for logging @inlinable public func adminListUserAuthEventsPaginator( @@ -4475,7 +4475,7 @@ extension CognitoIdentityProvider { /// /// - Parameters: /// - limit: The limit of the request to list groups. - /// - userPoolId: The user pool ID for the user pool. + /// - userPoolId: The ID of the user pool. /// - logger: Logger used for logging @inlinable public func listGroupsPaginator( @@ -4549,7 +4549,7 @@ extension CognitoIdentityProvider { /// /// - Parameters: /// - maxResults: The maximum number of resource servers to return. - /// - userPoolId: The user pool ID for the user pool. + /// - userPoolId: The ID of the user pool. /// - logger: Logger used for logging @inlinable public func listResourceServersPaginator( @@ -4586,7 +4586,7 @@ extension CognitoIdentityProvider { /// /// - Parameters: /// - maxResults: The maximum number of results you want the request to return when listing the user pool clients. - /// - userPoolId: The user pool ID for the user pool where you want to list user pool clients. + /// - userPoolId: The ID of the user pool where you want to list user pool clients. /// - logger: Logger used for logging @inlinable public func listUserPoolClientsPaginator( @@ -4659,7 +4659,7 @@ extension CognitoIdentityProvider { /// - attributesToGet: A JSON array of user attribute names, for example given_name, that you want Amazon Cognito to include in the response for each user. When you don't provide an AttributesToGet parameter, Amazon Cognito returns all attributes for each user. Use AttributesToGet with required attributes in your user pool, or in conjunction with Filter. Amazon Cognito returns an error if not all users in the results have set a value for the attribute you request. Attributes that you can't filter on, including custom attributes, must have a value set in every user profile before an AttributesToGet parameter returns results. /// - filter: A filter string of the form "AttributeName Filter-Type "AttributeValue". Quotation marks within the filter string must be escaped using the backslash (\) character. For example, "family_name = \"Reddy\"". AttributeName: The name of the attribute to search for. You can only search for one attribute at a time. Filter-Type: For an exact match, use =, for example, "given_name = \"Jon\"". For a prefix ("starts with") match, use ^=, for example, "given_name ^= \"Jon\"". AttributeValue: The attribute value that must be matched for each user. If the filter string is empty, ListUsers returns all users in the user pool. You can only search for the following standard attributes: username (case-sensitive) email phone_number name given_name family_name preferred_username cognito:user_status (called Status in the Console) (case-insensitive) status (called Enabled in the Console) (case-sensitive) sub Custom attributes aren't searchable. You can also list users with a client-side filter. The server-side filter matches no more than one attribute. For an advanced search, use a client-side filter with the --query parameter of the list-users action in the CLI. When you use a client-side filter, ListUsers returns a paginated list of zero or more users. You can receive multiple pages in a row with zero results. Repeat the query with each pagination token that is returned until you receive a null pagination token value, and then review the combined result. For more information about server-side and client-side filtering, see FilteringCLI output in the Command Line Interface User Guide. For more information, see Searching for Users Using the ListUsers API and Examples of Using the ListUsers API in the Amazon Cognito Developer Guide. /// - limit: Maximum number of users to be returned. - /// - userPoolId: The user pool ID for the user pool on which the search should be performed. + /// - userPoolId: The ID of the user pool on which the search should be performed. /// - logger: Logger used for logging @inlinable public func listUsersPaginator( @@ -4701,7 +4701,7 @@ extension CognitoIdentityProvider { /// - Parameters: /// - groupName: The name of the group. /// - limit: The maximum number of users that you want to retrieve before pagination. - /// - userPoolId: The user pool ID for the user pool. + /// - userPoolId: The ID of the user pool. /// - logger: Logger used for logging @inlinable public func listUsersInGroupPaginator( diff --git a/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_shapes.swift b/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_shapes.swift index b91f164925..19fa6462b2 100644 --- a/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_shapes.swift +++ b/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_shapes.swift @@ -477,9 +477,9 @@ extension CognitoIdentityProvider { } public struct AddCustomAttributesRequest: AWSEncodableShape { - /// An array of custom attributes, such as Mutable and Name. + /// An array of custom attribute names and other properties. Sets the following characteristics: AttributeDataType The expected data type. Can be a string, a number, a date and time, or a boolean. Mutable If true, you can grant app clients write access to the attribute value. If false, the attribute value can only be set up on sign-up or administrator creation of users. Name The attribute name. For an attribute like custom:myAttribute, enter myAttribute for this field. Required When true, users who sign up or are created must set a value for the attribute. NumberAttributeConstraints The minimum and maximum length of accepted values for a Number-type attribute. StringAttributeConstraints The minimum and maximum length of accepted values for a String-type attribute. DeveloperOnlyAttribute This legacy option creates an attribute with a dev: prefix. You can only set the value of a developer-only attribute with administrative IAM credentials. public let customAttributes: [SchemaAttributeType] - /// The user pool ID for the user pool where you want to add custom attributes. + /// The ID of the user pool where you want to add custom attributes. public let userPoolId: String @inlinable @@ -514,7 +514,7 @@ extension CognitoIdentityProvider { public let groupName: String /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String - /// The user pool ID for the user pool. + /// The ID of the user pool that contains the group that you want to add the user to. public let userPoolId: String @inlinable @@ -545,11 +545,11 @@ extension CognitoIdentityProvider { public struct AdminConfirmSignUpRequest: AWSEncodableShape { /// A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. If your user pool configuration includes triggers, the AdminConfirmSignUp API action invokes the Lambda function that is specified for the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. In this payload, the clientMetadata attribute provides the data that you assigned to the ClientMetadata parameter in your AdminConfirmSignUp request. In your function code in Lambda, you can process the ClientMetadata value to enhance your workflow for your specific needs. For more information, see - /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information. + /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't send sensitive information in this parameter. public let clientMetadata: [String: String]? /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String - /// The user pool ID for which you want to confirm user registration. + /// The ID of the user pool where you want to confirm a user's sign-up request. public let userPoolId: String @inlinable @@ -612,14 +612,14 @@ extension CognitoIdentityProvider { } public struct AdminCreateUserRequest: AWSEncodableShape { - /// A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminCreateUser API action, Amazon Cognito invokes the function that is assigned to the pre sign-up trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminCreateUser request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see - /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information. + /// A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminCreateUser API action, Amazon Cognito invokes the function that is assigned to the pre sign-up trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a ClientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminCreateUser request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see + /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't send sensitive information in this parameter. public let clientMetadata: [String: String]? - /// Specify "EMAIL" if email will be used to send the welcome message. Specify "SMS" if the phone number will be used. The default value is "SMS". You can specify more than one value. + /// Specify EMAIL if email will be used to send the welcome message. Specify SMS if the phone number will be used. The default value is SMS. You can specify more than one value. public let desiredDeliveryMediums: [DeliveryMediumType]? - /// This parameter is used only if the phone_number_verified or email_verified attribute is set to True. Otherwise, it is ignored. If this parameter is set to True and the phone number or email address specified in the UserAttributes parameter already exists as an alias with a different user, the API call will migrate the alias from the previous user to the newly created user. The previous user will no longer be able to log in using that alias. If this parameter is set to False, the API throws an AliasExistsException error if the alias already exists. The default value is False. + /// This parameter is used only if the phone_number_verified or email_verified attribute is set to True. Otherwise, it is ignored. If this parameter is set to True and the phone number or email address specified in the UserAttributes parameter already exists as an alias with a different user, this request migrates the alias from the previous user to the newly-created user. The previous user will no longer be able to log in using that alias. If this parameter is set to False, the API throws an AliasExistsException error if the alias already exists. The default value is False. public let forceAliasCreation: Bool? - /// Set to RESEND to resend the invitation message to a user that already exists and reset the expiration limit on the user's account. Set to SUPPRESS to suppress sending the message. You can specify only one value. + /// Set to RESEND to resend the invitation message to a user that already exists, and to reset the temporary-password duration with a new temporary password. Set to SUPPRESS to suppress sending the message. You can specify only one value. public let messageAction: MessageActionType? /// The user's temporary password. This password must conform to the password policy that you specified when you created the user pool. The exception to the requirement for a password is when your user pool supports passwordless sign-in with email or SMS OTPs. To create a user with no password, omit this parameter or submit a blank value. You can only create a passwordless user when passwordless sign-in is available. See the SignInPolicyType property of CreateUserPool and UpdateUserPool. The temporary password is valid only once. To complete the Admin Create User flow, the user must enter the temporary password in the sign-in page, along with a new password to be used in all future sign-ins. If you don't specify a value, Amazon Cognito generates one for you unless you have passwordless options active for your user pool. The temporary password can only be used until the user account expiration limit that you set for your user pool. To reset the account after that time limit, you must call AdminCreateUser again and specify RESEND for the MessageAction parameter. public let temporaryPassword: String? @@ -627,7 +627,7 @@ extension CognitoIdentityProvider { public let userAttributes: [AttributeType]? /// The value that you want to set as the username sign-in attribute. The following conditions apply to the username parameter. The username can't be a duplicate of another username in the same user pool. You can't change the value of a username after you create it. You can only provide a value if usernames are a valid sign-in attribute for your user pool. If your user pool only supports phone numbers or email addresses as sign-in attributes, Amazon Cognito automatically generates a username value. For more information, see Customizing sign-in attributes. public let username: String - /// The user pool ID for the user pool where the user will be created. + /// The ID of the user pool where you want to create a user. public let userPoolId: String /// Temporary user attributes that contribute to the outcomes of your pre sign-up Lambda trigger. This set of key-value pairs are for custom validation of information that you collect from your users but don't need to retain. Your Lambda function can analyze this additional data and act on it. Your function might perform external API operations like logging user attributes and validation data to Amazon CloudWatch Logs. Validation data might also affect the response that your function returns to Amazon Cognito, like automatically confirming the user if they sign up from within your network. For more information about the pre sign-up Lambda trigger, see Pre sign-up Lambda trigger. public let validationData: [AttributeType]? @@ -680,7 +680,7 @@ extension CognitoIdentityProvider { } public struct AdminCreateUserResponse: AWSDecodableShape { - /// The newly created user. + /// The new user's profile details. public let user: UserType? @inlinable @@ -698,7 +698,7 @@ extension CognitoIdentityProvider { public let userAttributeNames: [String] /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String - /// The user pool ID for the user pool where you want to delete user attributes. + /// The ID of the user pool where you want to delete user attributes. public let userPoolId: String @inlinable @@ -736,7 +736,7 @@ extension CognitoIdentityProvider { public struct AdminDeleteUserRequest: AWSEncodableShape { /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String - /// The user pool ID for the user pool where you want to delete the user. + /// The ID of the user pool where you want to delete the user. public let userPoolId: String @inlinable @@ -761,9 +761,9 @@ extension CognitoIdentityProvider { } public struct AdminDisableProviderForUserRequest: AWSEncodableShape { - /// The user to be disabled. + /// The user profile that you want to delete a linked identity from. public let user: ProviderUserIdentifierType - /// The user pool ID for the user pool. + /// The ID of the user pool where you want to delete the user's linked identities. public let userPoolId: String @inlinable @@ -790,7 +790,7 @@ extension CognitoIdentityProvider { public struct AdminDisableUserRequest: AWSEncodableShape { /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String - /// The user pool ID for the user pool where you want to disable the user. + /// The ID of the user pool where you want to disable the user. public let userPoolId: String @inlinable @@ -821,7 +821,7 @@ extension CognitoIdentityProvider { public struct AdminEnableUserRequest: AWSEncodableShape { /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String - /// The user pool ID for the user pool where you want to enable the user. + /// The ID of the user pool where you want to activate sign-in for the user. public let userPoolId: String @inlinable @@ -850,11 +850,11 @@ extension CognitoIdentityProvider { } public struct AdminForgetDeviceRequest: AWSEncodableShape { - /// The device key. + /// The key ID of the device that you want to delete. You can get device keys in the response to an AdminListDevices request. public let deviceKey: String /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String - /// The user pool ID. + /// The ID of the user pool where the device owner is a user. public let userPoolId: String @inlinable @@ -884,11 +884,11 @@ extension CognitoIdentityProvider { } public struct AdminGetDeviceRequest: AWSEncodableShape { - /// The device key. + /// The key of the device that you want to delete. You can get device IDs in the response to an AdminListDevices request. public let deviceKey: String /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String - /// The user pool ID. + /// The ID of the user pool where the device owner is a user. public let userPoolId: String @inlinable @@ -918,7 +918,7 @@ extension CognitoIdentityProvider { } public struct AdminGetDeviceResponse: AWSDecodableShape { - /// The device. + /// Details of the requested device. Includes device information, last-accessed and created dates, and the device key. public let device: DeviceType @inlinable @@ -934,7 +934,7 @@ extension CognitoIdentityProvider { public struct AdminGetUserRequest: AWSEncodableShape { /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String - /// The user pool ID for the user pool where you want to get information about the user. + /// The ID of the user pool where you want to get information about the user. public let userPoolId: String @inlinable @@ -959,24 +959,25 @@ extension CognitoIdentityProvider { } public struct AdminGetUserResponse: AWSDecodableShape { - /// Indicates that the status is enabled. + /// Indicates whether the user is activated for sign-in. The AdminDisableUser and AdminEnableUser API operations deactivate and activate user sign-in, respectively. public let enabled: Bool? /// This response parameter is no longer supported. It provides information only about SMS MFA configurations. It doesn't provide information about time-based one-time password (TOTP) software token MFA configurations. To look up information about either type of MFA configuration, use UserMFASettingList instead. public let mfaOptions: [MFAOptionType]? - /// The user's preferred MFA setting. + /// The user's preferred MFA. Users can prefer SMS message, email message, or TOTP MFA. public let preferredMfaSetting: String? - /// An array of name-value pairs representing user attributes. + /// An array of name-value pairs of user attributes and their values, for example "email": "testuser@example.com". public let userAttributes: [AttributeType]? - /// The date the user was created. + /// The date and time when the item was created. Amazon Cognito returns this timestamp in UNIX epoch time format. Your SDK might render the output in a + /// human-readable format like ISO 8601 or a Java Date object. public let userCreateDate: Date? /// The date and time when the item was modified. Amazon Cognito returns this timestamp in UNIX epoch time format. Your SDK might render the output in a /// human-readable format like ISO 8601 or a Java Date object. public let userLastModifiedDate: Date? - /// The MFA options that are activated for the user. The possible values in this list are SMS_MFA, EMAIL_OTP, and SOFTWARE_TOKEN_MFA. + /// The MFA options that are activated for the user. The possible values in this list are SMS_MFA, EMAIL_OTP, and SOFTWARE_TOKEN_MFA. You can change the MFA preference for users who have more than one available MFA factor with AdminSetUserMFAPreference or SetUserMFAPreference. public let userMFASettingList: [String]? /// The username of the user that you requested. public let username: String - /// The user status. Can be one of the following: UNCONFIRMED - User has been created but not confirmed. CONFIRMED - User has been confirmed. UNKNOWN - User status isn't known. RESET_REQUIRED - User is confirmed, but the user must request a code and reset their password before they can sign in. FORCE_CHANGE_PASSWORD - The user is confirmed and the user can sign in using a temporary password, but on first sign-in, the user must change their password to a new value before doing anything else. + /// The user's status. Can be one of the following: UNCONFIRMED - User has been created but not confirmed. CONFIRMED - User has been confirmed. UNKNOWN - User status isn't known. RESET_REQUIRED - User is confirmed, but the user must request a code and reset their password before they can sign in. FORCE_CHANGE_PASSWORD - The user is confirmed and the user can sign in using a temporary password, but on first sign-in, the user must change their password to a new value before doing anything else. EXTERNAL_PROVIDER - The user signed in with a third-party identity provider. public let userStatus: UserStatusType? @inlinable @@ -1006,24 +1007,25 @@ extension CognitoIdentityProvider { } public struct AdminInitiateAuthRequest: AWSEncodableShape { - /// The analytics metadata for collecting Amazon Pinpoint metrics for AdminInitiateAuth calls. + /// The analytics metadata for collecting Amazon Pinpoint metrics. public let analyticsMetadata: AnalyticsMetadataType? - /// The authentication flow that you want to initiate. The AuthParameters that you must submit are linked to the flow that you submit. For example: USER_AUTH: Request a preferred authentication type or review available authentication types. From the offered authentication types, select one in a challenge response and then authenticate with that method in an additional challenge response. REFRESH_TOKEN_AUTH: Receive new ID and access tokens when you pass a REFRESH_TOKEN parameter with a valid refresh token as the value. USER_SRP_AUTH: Receive secure remote password (SRP) variables for the next challenge, PASSWORD_VERIFIER, when you pass USERNAME and SRP_A parameters.. ADMIN_USER_PASSWORD_AUTH: Receive new tokens or the next challenge, for example SOFTWARE_TOKEN_MFA, when you pass USERNAME and PASSWORD parameters. Valid values include the following: USER_AUTH The entry point for sign-in with passwords, one-time passwords, biometric devices, and security keys. USER_SRP_AUTH Username-password authentication with the Secure Remote Password (SRP) protocol. For more information, see Use SRP password verification in custom authentication flow. REFRESH_TOKEN_AUTH and REFRESH_TOKEN Provide a valid refresh token and receive new ID and access tokens. For more information, see Using the refresh token. CUSTOM_AUTH Custom authentication with Lambda triggers. For more information, see Custom authentication challenge Lambda triggers. ADMIN_USER_PASSWORD_AUTH Username-password authentication with the password sent directly in the request. For more information, see Admin authentication flow. USER_PASSWORD_AUTH is a flow type of InitiateAuth and isn't valid for AdminInitiateAuth. + /// The authentication flow that you want to initiate. Each AuthFlow has linked AuthParameters that you must submit. The following are some example flows and their parameters. USER_AUTH: Request a preferred authentication type or review available authentication types. From the offered authentication types, select one in a challenge response and then authenticate with that method in an additional challenge response. REFRESH_TOKEN_AUTH: Receive new ID and access tokens when you pass a REFRESH_TOKEN parameter with a valid refresh token as the value. USER_SRP_AUTH: Receive secure remote password (SRP) variables for the next challenge, PASSWORD_VERIFIER, when you pass USERNAME and SRP_A parameters.. ADMIN_USER_PASSWORD_AUTH: Receive new tokens or the next challenge, for example SOFTWARE_TOKEN_MFA, when you pass USERNAME and PASSWORD parameters. All flows USER_AUTH The entry point for sign-in with passwords, one-time passwords, and WebAuthN authenticators. USER_SRP_AUTH Username-password authentication with the Secure Remote Password (SRP) protocol. For more information, see Use SRP password verification in custom authentication flow. REFRESH_TOKEN_AUTH and REFRESH_TOKEN Provide a valid refresh token and receive new ID and access tokens. For more information, see Using the refresh token. CUSTOM_AUTH Custom authentication with Lambda triggers. For more information, see Custom authentication challenge Lambda triggers. ADMIN_USER_PASSWORD_AUTH Username-password authentication with the password sent directly in the request. For more information, see Admin authentication flow. USER_PASSWORD_AUTH is a flow type of InitiateAuth and isn't valid for AdminInitiateAuth. public let authFlow: AuthFlowType /// The authentication parameters. These are inputs corresponding to the AuthFlow that you're invoking. The required values depend on the value of AuthFlow: For USER_AUTH: USERNAME (required), PREFERRED_CHALLENGE. If you don't provide a value for PREFERRED_CHALLENGE, Amazon Cognito responds with the AvailableChallenges parameter that specifies the available sign-in methods. For USER_SRP_AUTH: USERNAME (required), SRP_A (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY. For ADMIN_USER_PASSWORD_AUTH: USERNAME (required), PASSWORD (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY. For REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY. For CUSTOM_AUTH: USERNAME (required), SECRET_HASH (if app client is configured with client secret), DEVICE_KEY. To start the authentication flow with password verification, include ChallengeName: SRP_A and SRP_A: (The SRP_A Value). For more information about SECRET_HASH, see Computing secret hash values. For information about DEVICE_KEY, see Working with user devices in your user pool. public let authParameters: [String: String]? - /// The app client ID. + /// The ID of the app client where the user wants to sign in. public let clientId: String /// A map of custom key-value pairs that you can provide as input for certain custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminInitiateAuth API action, Amazon Cognito invokes the Lambda functions that are specified for various triggers. The ClientMetadata value is passed as input to the functions for only the following triggers: Pre signup Pre authentication User migration When Amazon Cognito invokes the functions for these triggers, it passes a JSON payload, which the function receives as input. This payload contains a validationData attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminInitiateAuth request. In your function code in Lambda, you can process the validationData value to enhance your workflow for your specific needs. When you use the AdminInitiateAuth API action, Amazon Cognito also invokes the functions for the following triggers, but it doesn't provide the ClientMetadata value as input: Post authentication Custom message Pre token generation Create auth challenge Define auth challenge Custom email sender Custom SMS sender For more information, see - /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information. + /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't send sensitive information in this parameter. public let clientMetadata: [String: String]? /// Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced /// security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito - /// when it makes API requests. + /// when it makes API requests. For more information, see Collecting data for threat protection in + /// applications. public let contextData: ContextDataType? - /// The optional session ID from a ConfirmSignUp API request. You can sign in a user directly from the sign-up process with the USER_AUTH authentication flow. + /// The optional session ID from a ConfirmSignUp API request. You can sign in a user directly from the sign-up process with an AuthFlow of USER_AUTH and AuthParameters of EMAIL_OTP or SMS_OTP, depending on how your user pool sent the confirmation-code message. public let session: String? - /// The ID of the Amazon Cognito user pool. + /// The ID of the user pool where the user wants to sign in. public let userPoolId: String @inlinable @@ -1072,7 +1074,7 @@ extension CognitoIdentityProvider { } public struct AdminInitiateAuthResponse: AWSDecodableShape { - /// The result of the authentication response. This is only returned if the caller doesn't need to pass another challenge. If the caller does need to pass another challenge before it gets tokens, ChallengeName, ChallengeParameters, and Session are returned. + /// The outcome of successful authentication. This is only returned if the user pool has no additional challenges to return. If Amazon Cognito returns another challenge, the response includes ChallengeName, ChallengeParameters, and Session so that your user can answer the challenge. public let authenticationResult: AuthenticationResultType? /// The name of the challenge that you're responding to with this call. This is returned in the AdminInitiateAuth response if you must pass another challenge. WEB_AUTHN: Respond to the challenge with the results of a successful authentication with a passkey, or webauthN, factor. These are typically biometric devices or security keys. PASSWORD: Respond with USER_PASSWORD_AUTH parameters: USERNAME (required), PASSWORD (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY. PASSWORD_SRP: Respond with USER_SRP_AUTH parameters: USERNAME (required), SRP_A (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY. SELECT_CHALLENGE: Respond to the challenge with USERNAME and an ANSWER that matches one of the challenge types in the AvailableChallenges response parameter. MFA_SETUP: If MFA is required, users who don't have at least one of the MFA methods set up are presented with an MFA_SETUP challenge. The user must set up at least one MFA type to continue to authenticate. SELECT_MFA_TYPE: Selects the MFA type. Valid MFA options are SMS_MFA for SMS message MFA, EMAIL_OTP for email message MFA, and SOFTWARE_TOKEN_MFA for time-based one-time password (TOTP) software token MFA. SMS_MFA: Next challenge is to supply an SMS_MFA_CODEthat your user pool delivered in an SMS message. EMAIL_OTP: Next challenge is to supply an EMAIL_OTP_CODE that your user pool delivered in an email message. PASSWORD_VERIFIER: Next challenge is to supply PASSWORD_CLAIM_SIGNATURE, PASSWORD_CLAIM_SECRET_BLOCK, and TIMESTAMP after the client-side SRP calculations. CUSTOM_CHALLENGE: This is returned if your custom authentication flow determines that the user should pass another challenge before tokens are issued. DEVICE_SRP_AUTH: If device tracking was activated in your user pool and the previous challenges were passed, this challenge is returned so that Amazon Cognito can start tracking this device. DEVICE_PASSWORD_VERIFIER: Similar to PASSWORD_VERIFIER, but for devices only. ADMIN_NO_SRP_AUTH: This is returned if you must authenticate with USERNAME and PASSWORD directly. An app client must be enabled to use this flow. NEW_PASSWORD_REQUIRED: For users who are required to change their passwords after successful first login. Respond to this challenge with NEW_PASSWORD and any required attributes that Amazon Cognito returned in the requiredAttributes parameter. You can also set values for attributes that aren't required by your user pool and that your app client can write. For more information, see AdminRespondToAuthChallenge. Amazon Cognito only returns this challenge for users who have temporary passwords. Because of this, and because in some cases you can create users who don't have values for required attributes, take care to collect and submit required-attribute values for all users who don't have passwords. You can create a user in the Amazon Cognito console without, for example, a required birthdate attribute. The API response from Amazon Cognito won't prompt you to submit a birthdate for the user if they don't have a password. In a NEW_PASSWORD_REQUIRED challenge response, you can't modify a required attribute that already has a value. /// In AdminRespondToAuthChallenge, set a value for any keys that Amazon Cognito returned in the requiredAttributes parameter, @@ -1080,7 +1082,7 @@ extension CognitoIdentityProvider { public let challengeName: ChallengeNameType? /// The challenge parameters. These are returned to you in the AdminInitiateAuth response if you must pass another challenge. The responses in this parameter should be used to compute inputs to the next call (AdminRespondToAuthChallenge). All challenges require USERNAME and SECRET_HASH (if applicable). The value of the USER_ID_FOR_SRP attribute is the user's actual username, not an alias (such as email address or phone number), even if you specified an alias in your call to AdminInitiateAuth. This happens because, in the AdminRespondToAuthChallenge API ChallengeResponses, the USERNAME attribute can't be an alias. public let challengeParameters: [String: String]? - /// The session that should be passed both ways in challenge-response calls to the service. If AdminInitiateAuth or AdminRespondToAuthChallenge API call determines that the caller must pass another challenge, they return a session with other challenge parameters. This session should be passed as it is to the next AdminRespondToAuthChallenge API call. + /// The session that must be passed to challenge-response requests. If an AdminInitiateAuth or AdminRespondToAuthChallenge API request determines that the caller must pass another challenge, Amazon Cognito returns a session ID and the parameters of the next challenge. Pass this session Id in the Session parameter of AdminRespondToAuthChallenge. public let session: String? @inlinable @@ -1104,7 +1106,7 @@ extension CognitoIdentityProvider { public let destinationUser: ProviderUserIdentifierType /// An external IdP account for a user who doesn't exist yet in the user pool. This user must be a federated user (for example, a SAML or Facebook user), not another native user. If the SourceUser is using a federated social IdP, such as Facebook, Google, or Login with Amazon, you must set the ProviderAttributeName to Cognito_Subject. For social IdPs, the ProviderName will be Facebook, Google, or LoginWithAmazon, and Amazon Cognito will automatically parse the Facebook, Google, and Login with Amazon tokens for id, sub, and user_id, respectively. The ProviderAttributeValue for the user must be the same value as the id, sub, or user_id value found in the social IdP token. For OIDC, the ProviderAttributeName can be any mapped value from a claim in the ID token, or that your app retrieves from the userInfo endpoint. For SAML, the ProviderAttributeName can be any mapped value from a claim in the SAML assertion. The following additional considerations apply to SourceUser for OIDC and SAML providers. You must map the claim to a user pool attribute in your IdP configuration, and set the user pool attribute name as the value of ProviderAttributeName in your AdminLinkProviderForUser request. For example, email. When you set ProviderAttributeName to Cognito_Subject, Amazon Cognito will automatically parse the default unique identifier found in the subject from the IdP token. public let sourceUser: ProviderUserIdentifierType - /// The user pool ID for the user pool. + /// The ID of the user pool where you want to link a federated identity. public let userPoolId: String @inlinable @@ -1132,7 +1134,7 @@ extension CognitoIdentityProvider { } public struct AdminListDevicesRequest: AWSEncodableShape { - /// The limit of the devices request. + /// The maximum number of devices that you want Amazon Cognito to return in the response. public let limit: Int? /// This API operation returns a limited number of results. The pagination token is /// an identifier that you can present in an additional API request with the same parameters. When @@ -1142,7 +1144,7 @@ extension CognitoIdentityProvider { public let paginationToken: String? /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String - /// The user pool ID. + /// The ID of the user pool where the device owner is a user. public let userPoolId: String @inlinable @@ -1175,7 +1177,7 @@ extension CognitoIdentityProvider { } public struct AdminListDevicesResponse: AWSDecodableShape { - /// The devices in the list of devices response. + /// An array of devices and their information. Each entry that's returned includes device information, last-accessed and created dates, and the device key. public let devices: [DeviceType]? /// The identifier that Amazon Cognito returned with the previous request to this operation. When /// you include a pagination token in your request, Amazon Cognito returns the next set of items in @@ -1195,13 +1197,17 @@ extension CognitoIdentityProvider { } public struct AdminListGroupsForUserRequest: AWSEncodableShape { - /// The limit of the request to list groups. + /// The maximum number of groups that you want Amazon Cognito to return in the response. public let limit: Int? - /// An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list. + /// This API operation returns a limited number of results. The pagination token is + /// an identifier that you can present in an additional API request with the same parameters. When + /// you include the pagination token, Amazon Cognito returns the next set of items after the current list. + /// Subsequent requests return a new pagination token. By use of this token, you can paginate + /// through the full list of items. public let nextToken: String? /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String - /// The user pool ID for the user pool. + /// The ID of the user pool where you want to view a user's groups. public let userPoolId: String @inlinable @@ -1235,9 +1241,11 @@ extension CognitoIdentityProvider { } public struct AdminListGroupsForUserResponse: AWSDecodableShape { - /// The groups that the user belongs to. + /// An array of groups and information about them. public let groups: [GroupType]? - /// An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list. + /// The identifier that Amazon Cognito returned with the previous request to this operation. When + /// you include a pagination token in your request, Amazon Cognito returns the next set of items in + /// the list. By use of this token, you can paginate through the full list of items. public let nextToken: String? @inlinable @@ -1255,11 +1263,15 @@ extension CognitoIdentityProvider { public struct AdminListUserAuthEventsRequest: AWSEncodableShape { /// The maximum number of authentication events to return. Returns 60 events if you set MaxResults to 0, or if you don't include a MaxResults parameter. public let maxResults: Int? - /// A pagination token. + /// This API operation returns a limited number of results. The pagination token is + /// an identifier that you can present in an additional API request with the same parameters. When + /// you include the pagination token, Amazon Cognito returns the next set of items after the current list. + /// Subsequent requests return a new pagination token. By use of this token, you can paginate + /// through the full list of items. public let nextToken: String? /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String - /// The user pool ID. + /// The Id of the user pool that contains the user profile with the logged events. public let userPoolId: String @inlinable @@ -1295,7 +1307,9 @@ extension CognitoIdentityProvider { public struct AdminListUserAuthEventsResponse: AWSDecodableShape { /// The response object. It includes the EventID, EventType, CreationDate, EventRisk, and EventResponse. public let authEvents: [AuthEventType]? - /// A pagination token. + /// The identifier that Amazon Cognito returned with the previous request to this operation. When + /// you include a pagination token in your request, Amazon Cognito returns the next set of items in + /// the list. By use of this token, you can paginate through the full list of items. public let nextToken: String? @inlinable @@ -1311,11 +1325,11 @@ extension CognitoIdentityProvider { } public struct AdminRemoveUserFromGroupRequest: AWSEncodableShape { - /// The group name. + /// The name of the group that you want to remove the user from, for example MyTestGroup. public let groupName: String /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String - /// The user pool ID for the user pool. + /// The ID of the user pool that contains the group and the user that you want to remove. public let userPoolId: String @inlinable @@ -1345,12 +1359,12 @@ extension CognitoIdentityProvider { } public struct AdminResetUserPasswordRequest: AWSEncodableShape { - /// A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminResetUserPassword API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminResetUserPassword request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see - /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information. + /// A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. The AdminResetUserPassword API operation invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminResetUserPassword request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see + /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't send sensitive information in this parameter. public let clientMetadata: [String: String]? /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String - /// The user pool ID for the user pool where you want to reset the user's password. + /// The ID of the user pool where you want to reset the user's password. public let userPoolId: String @inlinable @@ -1387,24 +1401,25 @@ extension CognitoIdentityProvider { public struct AdminRespondToAuthChallengeRequest: AWSEncodableShape { /// The analytics metadata for collecting Amazon Pinpoint metrics for AdminRespondToAuthChallenge calls. public let analyticsMetadata: AnalyticsMetadataType? - /// The challenge name. For more information, see AdminInitiateAuth. + /// The name of the challenge that you are responding to. You can find more information about values for ChallengeName in the response parameters of AdminInitiateAuth. public let challengeName: ChallengeNameType /// The responses to the challenge that you received in the previous request. Each challenge has its own required response parameters. The following examples are partial JSON request bodies that highlight challenge-response parameters. You must provide a SECRET_HASH parameter in all challenge responses to an app client that has a client secret. Include a DEVICE_KEY for device authentication. SELECT_CHALLENGE "ChallengeName": "SELECT_CHALLENGE", "ChallengeResponses": { "USERNAME": "[username]", "ANSWER": "[Challenge name]"} Available challenges are PASSWORD, PASSWORD_SRP, EMAIL_OTP, SMS_OTP, and WEB_AUTHN. Complete authentication in the SELECT_CHALLENGE response for PASSWORD, PASSWORD_SRP, and WEB_AUTHN: "ChallengeName": "SELECT_CHALLENGE", "ChallengeResponses": { "ANSWER": "WEB_AUTHN", "USERNAME": "[username]", "CREDENTIAL": "[AuthenticationResponseJSON]"} See AuthenticationResponseJSON. "ChallengeName": "SELECT_CHALLENGE", "ChallengeResponses": { "ANSWER": "PASSWORD", "USERNAME": "[username]", "PASSWORD": "[password]"} "ChallengeName": "SELECT_CHALLENGE", "ChallengeResponses": { "ANSWER": "PASSWORD_SRP", "USERNAME": "[username]", "SRP_A": "[SRP_A]"} For SMS_OTP and EMAIL_OTP, respond with the username and answer. Your user pool will send a code for the user to submit in the next challenge response. "ChallengeName": "SELECT_CHALLENGE", "ChallengeResponses": { "ANSWER": "SMS_OTP", "USERNAME": "[username]"} "ChallengeName": "SELECT_CHALLENGE", "ChallengeResponses": { "ANSWER": "EMAIL_OTP", "USERNAME": "[username]"} SMS_OTP "ChallengeName": "SMS_OTP", "ChallengeResponses": {"SMS_OTP_CODE": "[code]", "USERNAME": "[username]"} EMAIL_OTP "ChallengeName": "EMAIL_OTP", "ChallengeResponses": {"EMAIL_OTP_CODE": "[code]", "USERNAME": "[username]"} SMS_MFA "ChallengeName": "SMS_MFA", "ChallengeResponses": {"SMS_MFA_CODE": "[code]", "USERNAME": "[username]"} PASSWORD_VERIFIER This challenge response is part of the SRP flow. Amazon Cognito requires that your application respond to this challenge within a few seconds. When the response time exceeds this period, your user pool returns a NotAuthorizedException error. "ChallengeName": "PASSWORD_VERIFIER", "ChallengeResponses": {"PASSWORD_CLAIM_SIGNATURE": "[claim_signature]", "PASSWORD_CLAIM_SECRET_BLOCK": "[secret_block]", "TIMESTAMP": [timestamp], "USERNAME": "[username]"} Add "DEVICE_KEY" when you sign in with a remembered device. CUSTOM_CHALLENGE "ChallengeName": "CUSTOM_CHALLENGE", "ChallengeResponses": {"USERNAME": "[username]", "ANSWER": "[challenge_answer]"} Add "DEVICE_KEY" when you sign in with a remembered device. NEW_PASSWORD_REQUIRED "ChallengeName": "NEW_PASSWORD_REQUIRED", "ChallengeResponses": {"NEW_PASSWORD": "[new_password]", "USERNAME": "[username]"} To set any required attributes that InitiateAuth returned in an requiredAttributes parameter, add "userAttributes.[attribute_name]": "[attribute_value]". This parameter can also set values for writable attributes that aren't required by your user pool. In a NEW_PASSWORD_REQUIRED challenge response, you can't modify a required attribute that already has a value. /// In RespondToAuthChallenge, set a value for any keys that Amazon Cognito returned in the requiredAttributes parameter, /// then use the UpdateUserAttributes API operation to modify the value of any additional attributes. SOFTWARE_TOKEN_MFA "ChallengeName": "SOFTWARE_TOKEN_MFA", "ChallengeResponses": {"USERNAME": "[username]", "SOFTWARE_TOKEN_MFA_CODE": [authenticator_code]} DEVICE_SRP_AUTH "ChallengeName": "DEVICE_SRP_AUTH", "ChallengeResponses": {"USERNAME": "[username]", "DEVICE_KEY": "[device_key]", "SRP_A": "[srp_a]"} DEVICE_PASSWORD_VERIFIER "ChallengeName": "DEVICE_PASSWORD_VERIFIER", "ChallengeResponses": {"DEVICE_KEY": "[device_key]", "PASSWORD_CLAIM_SIGNATURE": "[claim_signature]", "PASSWORD_CLAIM_SECRET_BLOCK": "[secret_block]", "TIMESTAMP": [timestamp], "USERNAME": "[username]"} MFA_SETUP "ChallengeName": "MFA_SETUP", "ChallengeResponses": {"USERNAME": "[username]"}, "SESSION": "[Session ID from VerifySoftwareToken]" SELECT_MFA_TYPE "ChallengeName": "SELECT_MFA_TYPE", "ChallengeResponses": {"USERNAME": "[username]", "ANSWER": "[SMS_MFA or SOFTWARE_TOKEN_MFA]"} For more information about SECRET_HASH, see Computing secret hash values. For information about DEVICE_KEY, see Working with user devices in your user pool. public let challengeResponses: [String: String]? - /// The app client ID. + /// The ID of the app client where you initiated sign-in. public let clientId: String - /// A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminRespondToAuthChallenge API action, Amazon Cognito invokes any functions that you have assigned to the following triggers: pre sign-up custom message post authentication user migration pre token generation define auth challenge create auth challenge verify auth challenge response When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute that provides the data that you assigned to the ClientMetadata parameter in your AdminRespondToAuthChallenge request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see - /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information. + /// A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminRespondToAuthChallenge API action, Amazon Cognito invokes any functions that you have assigned to the following triggers: Pre sign-up custom message Post authentication User migration Pre token generation Define auth challenge Create auth challenge Verify auth challenge response When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute that provides the data that you assigned to the ClientMetadata parameter in your AdminRespondToAuthChallenge request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see + /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't send sensitive information in this parameter. public let clientMetadata: [String: String]? /// Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced /// security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito - /// when it makes API requests. + /// when it makes API requests. For more information, see Collecting data for threat protection in + /// applications. public let contextData: ContextDataType? - /// The session that should be passed both ways in challenge-response calls to the service. If an InitiateAuth or RespondToAuthChallenge API call determines that the caller must pass another challenge, it returns a session with other challenge parameters. This session should be passed as it is to the next RespondToAuthChallenge API call. + /// The session identifier that maintains the state of authentication requests and challenge responses. If an AdminInitiateAuth or AdminRespondToAuthChallenge API request results in a determination that your application must pass another challenge, Amazon Cognito returns a session with other challenge parameters. Send this session identifier, unmodified, to the next AdminRespondToAuthChallenge request. public let session: String? - /// The ID of the Amazon Cognito user pool. + /// The ID of the user pool where you want to respond to an authentication challenge. public let userPoolId: String @inlinable @@ -1453,13 +1468,13 @@ extension CognitoIdentityProvider { } public struct AdminRespondToAuthChallengeResponse: AWSDecodableShape { - /// The result returned by the server in response to the authentication request. + /// The outcome of a successful authentication process. After your application has passed all challenges, Amazon Cognito returns an AuthenticationResult with the JSON web tokens (JWTs) that indicate successful sign-in. public let authenticationResult: AuthenticationResultType? - /// The name of the challenge. For more information, see AdminInitiateAuth. + /// The name of the challenge that you must next respond to. You can find more information about values for ChallengeName in the response parameters of AdminInitiateAuth. public let challengeName: ChallengeNameType? - /// The challenge parameters. For more information, see AdminInitiateAuth. + /// The parameters that define your response to the next challenge. Take the values in ChallengeParameters and provide values for them in the ChallengeResponses of the next AdminRespondToAuthChallenge request. public let challengeParameters: [String: String]? - /// The session that should be passed both ways in challenge-response calls to the service. If the caller must pass another challenge, they return a session with other challenge parameters. This session should be passed as it is to the next RespondToAuthChallenge API call. + /// The session identifier that maintains the state of authentication requests and challenge responses. If an AdminInitiateAuth or AdminRespondToAuthChallenge API request results in a determination that your application must pass another challenge, Amazon Cognito returns a session with other challenge parameters. Send this session identifier, unmodified, to the next AdminRespondToAuthChallenge request. public let session: String? @inlinable @@ -1522,13 +1537,13 @@ extension CognitoIdentityProvider { } public struct AdminSetUserPasswordRequest: AWSEncodableShape { - /// The password for the user. + /// The new temporary or permanent password that you want to set for the user. You can't remove the password for a user who already has a password so that they can only sign in with passwordless methods. In this scenario, you must create a new user without a password. public let password: String - /// True if the password is permanent, False if it is temporary. + /// Set to true to set a password that the user can immediately sign in with. Set to false to set a temporary password that the user must change on their next sign-in. public let permanent: Bool? /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String - /// The user pool ID for the user pool where you want to set the user's password. + /// The ID of the user pool where you want to set the user's password. public let userPoolId: String @inlinable @@ -1601,7 +1616,7 @@ extension CognitoIdentityProvider { } public struct AdminUpdateAuthEventFeedbackRequest: AWSEncodableShape { - /// The authentication event ID. + /// The authentication event ID. To query authentication events for a user, see AdminListUserAuthEvents. public let eventId: String /// The authentication event feedback value. When you provide a FeedbackValue /// value of valid, you tell Amazon Cognito that you trust a user session where Amazon Cognito @@ -1611,7 +1626,7 @@ extension CognitoIdentityProvider { public let feedbackValue: FeedbackValueType /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String - /// The user pool ID. + /// The ID of the user pool where you want to submit authentication-event feedback. public let userPoolId: String @inlinable @@ -1647,13 +1662,13 @@ extension CognitoIdentityProvider { } public struct AdminUpdateDeviceStatusRequest: AWSEncodableShape { - /// The device key. + /// The unique identifier, or device key, of the device that you want to update the status for. public let deviceKey: String - /// The status indicating whether a device has been remembered or not. + /// To enable device authentication with the specified device, set to remembered.To disable, set to not_remembered. public let deviceRememberedStatus: DeviceRememberedStatusType? /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String - /// The user pool ID. + /// The ID of the user pool where you want to change a user's device status. public let userPoolId: String @inlinable @@ -1690,13 +1705,13 @@ extension CognitoIdentityProvider { public struct AdminUpdateUserAttributesRequest: AWSEncodableShape { /// A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminUpdateUserAttributes API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminUpdateUserAttributes request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see - /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information. + /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't send sensitive information in this parameter. public let clientMetadata: [String: String]? /// An array of name-value pairs representing user attributes. For custom attributes, you must prepend the custom: prefix to the attribute name. If your user pool requires verification before Amazon Cognito updates an attribute value that you specify in this request, Amazon Cognito doesn’t immediately update the value of that attribute. After your user receives and responds to a verification message to verify the new value, Amazon Cognito updates the attribute value. Your user can sign in and receive messages with the original attribute value until they verify the new value. To skip the verification message and update the value of an attribute that requires verification in the same API request, include the email_verified or phone_number_verified attribute, with a value of true. If you set the email_verified or phone_number_verified value for an email or phone_number attribute that requires verification to true, Amazon Cognito doesn’t send a verification message to your user. public let userAttributes: [AttributeType] /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String - /// The user pool ID for the user pool where you want to update user attributes. + /// The ID of the user pool where you want to update user attributes. public let userPoolId: String @inlinable @@ -1738,7 +1753,7 @@ extension CognitoIdentityProvider { public struct AdminUserGlobalSignOutRequest: AWSEncodableShape { /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String - /// The user pool ID. + /// The ID of the user pool where you want to sign out a user. public let userPoolId: String @inlinable @@ -1877,9 +1892,9 @@ extension CognitoIdentityProvider { } public struct AssociateSoftwareTokenRequest: AWSEncodableShape { - /// A valid access token that Amazon Cognito issued to the user whose software token you want to generate. + /// A valid access token that Amazon Cognito issued to the user whose software token you want to generate. You can provide either an access token or a session ID in the request. public let accessToken: String? - /// The session that should be passed both ways in challenge-response calls to the service. This allows authentication of the user as part of the MFA setup process. + /// The session identifier that maintains the state of authentication requests and challenge responses. In AssociateSoftwareToken, this is the session ID from a successful sign-in. You can provide either an access token or a session ID in the request. public let session: String? @inlinable @@ -1901,9 +1916,9 @@ extension CognitoIdentityProvider { } public struct AssociateSoftwareTokenResponse: AWSDecodableShape { - /// A unique generated shared secret code that is used in the TOTP algorithm to generate a one-time code. + /// A unique generated shared secret code that is used by the TOTP algorithm to generate a one-time code. public let secretCode: String? - /// The session that should be passed both ways in challenge-response calls to the service. This allows authentication of the user as part of the MFA setup process. + /// The session identifier that maintains the state of authentication requests and challenge responses. This session ID is valid for the next request in this flow, VerifySoftwareToken. public let session: String? @inlinable @@ -2043,7 +2058,7 @@ extension CognitoIdentityProvider { public let accessToken: String /// The user's previous password. Required if the user has a password. If the user has no password and only signs in with passwordless authentication options, you can omit this parameter. public let previousPassword: String? - /// The new password. + /// A new password that you prompted the user to enter in your application. public let proposedPassword: String @inlinable @@ -2115,7 +2130,7 @@ extension CognitoIdentityProvider { } public struct CompleteWebAuthnRegistrationRequest: AWSEncodableShape { - /// A valid access token that Amazon Cognito issued to the user whose passkey registration you want to verify. + /// A valid access token that Amazon Cognito issued to the user whose passkey registration you want to complete. public let accessToken: String /// A RegistrationResponseJSON public-key credential response from the user's passkey provider. public let credential: String @@ -2175,9 +2190,9 @@ extension CognitoIdentityProvider { public struct ConfirmDeviceRequest: AWSEncodableShape { /// A valid access token that Amazon Cognito issued to the user whose device you want to confirm. public let accessToken: String - /// The device key. + /// The unique identifier, or device key, of the device that you want to update the status for. public let deviceKey: String - /// The device name. + /// A friendly name for the device, for example MyMobilePhone. public let deviceName: String? /// The configuration of the device secret verifier. public let deviceSecretVerifierConfig: DeviceSecretVerifierConfigType? @@ -2209,7 +2224,7 @@ extension CognitoIdentityProvider { } public struct ConfirmDeviceResponse: AWSDecodableShape { - /// Indicates whether the user confirmation must confirm the device response. + /// When true, your user must confirm that they want to remember the device. Prompt the user for an answer. You must then make an UpdateUserDevice request that sets the device to remembered or not_remembered. When false, immediately sets the device as remembered and eligible for device authentication. You can configure your user pool to always remember devices, in which case this response is false, or to allow users to opt in, in which case this response is true. Configure this option under Device tracking in the Sign-in menu of your user pool. You can also configure this option with the DeviceConfiguration parameter of a CreateUserPool or UpdateUserPool request. public let userConfirmationNecessary: Bool? @inlinable @@ -2225,12 +2240,12 @@ extension CognitoIdentityProvider { public struct ConfirmForgotPasswordRequest: AWSEncodableShape { /// The Amazon Pinpoint analytics metadata for collecting metrics for ConfirmForgotPassword calls. public let analyticsMetadata: AnalyticsMetadataType? - /// The app client ID of the app associated with the user pool. + /// The ID of the app client where the user wants to reset their password. This parameter is an identifier of the client application that users are resetting their password from, but this operation resets users' passwords for all app clients in the user pool. public let clientId: String /// A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the ConfirmForgotPassword API action, Amazon Cognito invokes the function that is assigned to the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ConfirmForgotPassword request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see - /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information. + /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't send sensitive information in this parameter. public let clientMetadata: [String: String]? - /// The confirmation code from your user's request to reset their password. For more information, see ForgotPassword. + /// The confirmation code that your user pool sent in response to an AdminResetUserPassword or a ForgotPassword request. public let confirmationCode: String /// The new password that your user wants to set. public let password: String @@ -2238,7 +2253,8 @@ extension CognitoIdentityProvider { public let secretHash: String? /// Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced /// security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito - /// when it makes API requests. + /// when it makes API requests. For more information, see Collecting data for threat protection in + /// applications. public let userContextData: UserContextDataType? /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String @@ -2300,19 +2316,20 @@ extension CognitoIdentityProvider { /// The ID of the app client associated with the user pool. public let clientId: String /// A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the ConfirmSignUp API action, Amazon Cognito invokes the function that is assigned to the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ConfirmSignUp request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see - /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information. + /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't send sensitive information in this parameter. public let clientMetadata: [String: String]? - /// The confirmation code sent by a user's request to confirm registration. + /// The confirmation code that your user pool sent in response to the SignUp request. public let confirmationCode: String - /// Boolean to be specified to force user confirmation irrespective of existing alias. By default set to False. If this parameter is set to True and the phone number/email used for sign up confirmation already exists as an alias with a different user, the API call will migrate the alias from the previous user to the newly created user being confirmed. If set to False, the API will throw an AliasExistsException error. + /// When true, forces user confirmation despite any existing aliases. Defaults to false. A value of true migrates the alias from an existing user to the new user if an existing user already has the phone number or email address as an alias. Say, for example, that an existing user has an email attribute of bob@example.com and email is an alias in your user pool. If the new user also has an email of bob@example.com and your ConfirmSignUp response sets ForceAliasCreation to true, the new user can sign in with a username of bob@example.com and the existing user can no longer do so. If false and an attribute belongs to an existing alias, this request returns an AliasExistsException error. For more information about sign-in aliases, see Customizing sign-in attributes. public let forceAliasCreation: Bool? - /// A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. + /// A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. For more information about SecretHash, see Computing secret hash values. public let secretHash: String? /// The optional session ID from a SignUp API request. You can sign in a user directly from the sign-up process with the USER_AUTH authentication flow. public let session: String? /// Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced /// security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito - /// when it makes API requests. + /// when it makes API requests. For more information, see Collecting data for threat protection in + /// applications. public let userContextData: UserContextDataType? /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String @@ -2367,7 +2384,7 @@ extension CognitoIdentityProvider { } public struct ConfirmSignUpResponse: AWSDecodableShape { - /// You can automatically sign users in with the one-time password that they provided in a successful ConfirmSignUp request. To do this, pass the Session parameter from the ConfirmSignUp response in the Session parameter of an InitiateAuth or AdminInitiateAuth request. + /// A session identifier that you can use to immediately sign in the confirmed user. You can automatically sign users in with the one-time password that they provided in a successful ConfirmSignUp request. To do this, pass the Session parameter from this response in the Session parameter of an InitiateAuth or AdminInitiateAuth request. public let session: String? @inlinable @@ -2421,15 +2438,15 @@ extension CognitoIdentityProvider { } public struct CreateGroupRequest: AWSEncodableShape { - /// A string containing the description of the group. + /// A description of the group that you're creating. public let description: String? - /// The name of the group. Must be unique. + /// A name for the group. This name must be unique in your user pool. public let groupName: String /// A non-negative integer value that specifies the precedence of this group relative to the other groups that a user can belong to in the user pool. Zero is the highest precedence value. Groups with lower Precedence values take precedence over groups with higher or null Precedence values. If a user belongs to two or more groups, it is the group with the lowest precedence value whose role ARN is given in the user's tokens for the cognito:roles and cognito:preferred_role claims. Two groups can have the same Precedence value. If this happens, neither group takes precedence over the other. If two groups with the same Precedence have the same role ARN, that role is used in the cognito:preferred_role claim in tokens for users in each group. If the two groups have different role ARNs, the cognito:preferred_role claim isn't set in users' tokens. The default Precedence value is null. The maximum Precedence value is 2^31-1. public let precedence: Int? - /// The role Amazon Resource Name (ARN) for the group. + /// The Amazon Resource Name (ARN) for the IAM role that you want to associate with the group. A group role primarily declares a preferred role for the credentials that you get from an identity pool. Amazon Cognito ID tokens have a cognito:preferred_role claim that presents the highest-precedence group that a user belongs to. Both ID and access tokens also contain a cognito:groups claim that list all the groups that a user is a member of. public let roleArn: String? - /// The user pool ID for the user pool. + /// The ID of the user pool where you want to create a user group. public let userPoolId: String @inlinable @@ -2465,7 +2482,7 @@ extension CognitoIdentityProvider { } public struct CreateGroupResponse: AWSDecodableShape { - /// The group object for the group. + /// The response object for a created group. public let group: GroupType? @inlinable @@ -2479,20 +2496,20 @@ extension CognitoIdentityProvider { } public struct CreateIdentityProviderRequest: AWSEncodableShape { - /// A mapping of IdP attributes to standard and custom user pool attributes. + /// A mapping of IdP attributes to standard and custom user pool attributes. Specify a user pool attribute as the key of the key-value pair, and the IdP attribute claim name as the value. public let attributeMapping: [String: String]? - /// A list of IdP identifiers. + /// An array of IdP identifiers, for example "IdPIdentifiers": [ "MyIdP", "MyIdP2" ]. Identifiers are friendly names that you can pass in the idp_identifier query parameter of requests to the Authorize endpoint to silently redirect to sign-in with the associated IdP. Identifiers in a domain format also enable the use of email-address matching with SAML providers. public let idpIdentifiers: [String]? /// The scopes, URLs, and identifiers for your external identity provider. The following /// examples describe the provider detail keys for each IdP type. These values and their /// schema are subject to change. Social IdP authorize_scopes values must match /// the values listed here. OpenID Connect (OIDC) Amazon Cognito accepts the following elements when it can't discover endpoint URLs from oidc_issuer: attributes_url, authorize_url, jwks_uri, token_url. Create or update request: "ProviderDetails": { "attributes_request_method": "GET", "attributes_url": "https://auth.example.com/userInfo", "authorize_scopes": "openid profile email", "authorize_url": "https://auth.example.com/authorize", "client_id": "1example23456789", "client_secret": "provider-app-client-secret", "jwks_uri": "https://auth.example.com/.well-known/jwks.json", "oidc_issuer": "https://auth.example.com", "token_url": "https://example.com/token" } Describe response: "ProviderDetails": { "attributes_request_method": "GET", "attributes_url": "https://auth.example.com/userInfo", "attributes_url_add_attributes": "false", "authorize_scopes": "openid profile email", "authorize_url": "https://auth.example.com/authorize", "client_id": "1example23456789", "client_secret": "provider-app-client-secret", "jwks_uri": "https://auth.example.com/.well-known/jwks.json", "oidc_issuer": "https://auth.example.com", "token_url": "https://example.com/token" } SAML Create or update request with Metadata URL: "ProviderDetails": { "IDPInit": "true", "IDPSignout": "true", "EncryptedResponses" : "true", "MetadataURL": "https://auth.example.com/sso/saml/metadata", "RequestSigningAlgorithm": "rsa-sha256" } Create or update request with Metadata file: "ProviderDetails": { "IDPInit": "true", "IDPSignout": "true", "EncryptedResponses" : "true", "MetadataFile": "[metadata XML]", "RequestSigningAlgorithm": "rsa-sha256" } The value of MetadataFile must be the plaintext metadata document with all quote (") characters escaped by backslashes. Describe response: "ProviderDetails": { "IDPInit": "true", "IDPSignout": "true", "EncryptedResponses" : "true", "ActiveEncryptionCertificate": "[certificate]", "MetadataURL": "https://auth.example.com/sso/saml/metadata", "RequestSigningAlgorithm": "rsa-sha256", "SLORedirectBindingURI": "https://auth.example.com/slo/saml", "SSORedirectBindingURI": "https://auth.example.com/sso/saml" } LoginWithAmazon Create or update request: "ProviderDetails": { "authorize_scopes": "profile postal_code", "client_id": "amzn1.application-oa2-client.1example23456789", "client_secret": "provider-app-client-secret" Describe response: "ProviderDetails": { "attributes_url": "https://api.amazon.com/user/profile", "attributes_url_add_attributes": "false", "authorize_scopes": "profile postal_code", "authorize_url": "https://www.amazon.com/ap/oa", "client_id": "amzn1.application-oa2-client.1example23456789", "client_secret": "provider-app-client-secret", "token_request_method": "POST", "token_url": "https://api.amazon.com/auth/o2/token" } Google Create or update request: "ProviderDetails": { "authorize_scopes": "email profile openid", "client_id": "1example23456789.apps.googleusercontent.com", "client_secret": "provider-app-client-secret" } Describe response: "ProviderDetails": { "attributes_url": "https://people.googleapis.com/v1/people/me?personFields=", "attributes_url_add_attributes": "true", "authorize_scopes": "email profile openid", "authorize_url": "https://accounts.google.com/o/oauth2/v2/auth", "client_id": "1example23456789.apps.googleusercontent.com", "client_secret": "provider-app-client-secret", "oidc_issuer": "https://accounts.google.com", "token_request_method": "POST", "token_url": "https://www.googleapis.com/oauth2/v4/token" } SignInWithApple Create or update request: "ProviderDetails": { "authorize_scopes": "email name", "client_id": "com.example.cognito", "private_key": "1EXAMPLE", "key_id": "2EXAMPLE", "team_id": "3EXAMPLE" } Describe response: "ProviderDetails": { "attributes_url_add_attributes": "false", "authorize_scopes": "email name", "authorize_url": "https://appleid.apple.com/auth/authorize", "client_id": "com.example.cognito", "key_id": "1EXAMPLE", "oidc_issuer": "https://appleid.apple.com", "team_id": "2EXAMPLE", "token_request_method": "POST", "token_url": "https://appleid.apple.com/auth/token" } Facebook Create or update request: "ProviderDetails": { "api_version": "v17.0", "authorize_scopes": "public_profile, email", "client_id": "1example23456789", "client_secret": "provider-app-client-secret" } Describe response: "ProviderDetails": { "api_version": "v17.0", "attributes_url": "https://graph.facebook.com/v17.0/me?fields=", "attributes_url_add_attributes": "true", "authorize_scopes": "public_profile, email", "authorize_url": "https://www.facebook.com/v17.0/dialog/oauth", "client_id": "1example23456789", "client_secret": "provider-app-client-secret", "token_request_method": "GET", "token_url": "https://graph.facebook.com/v17.0/oauth/access_token" } public let providerDetails: [String: String] - /// The IdP name. + /// The name that you want to assign to the IdP. You can pass the identity provider name in the identity_provider query parameter of requests to the Authorize endpoint to silently redirect to sign-in with the associated IdP. public let providerName: String - /// The IdP type. + /// The type of IdP that you want to add. Amazon Cognito supports OIDC, SAML 2.0, Login With Amazon, Sign In With Apple, Google, and Facebook IdPs. public let providerType: IdentityProviderTypeType - /// The user pool ID. + /// The Id of the user pool where you want to create an IdP. public let userPoolId: String @inlinable @@ -2540,7 +2557,7 @@ extension CognitoIdentityProvider { } public struct CreateIdentityProviderResponse: AWSDecodableShape { - /// The newly created IdP object. + /// The details of the new user pool IdP. public let identityProvider: IdentityProviderType @inlinable @@ -2560,7 +2577,7 @@ extension CognitoIdentityProvider { public let clientId: String /// A JSON file, encoded as a Document type, with the the settings that you want to apply to your style. public let settings: String? - /// When true, applies the default branding style options. This option reverts to default style options that are managed by Amazon Cognito. You can modify them later in the branding designer. When you specify true for this option, you must also omit values for Settings and Assets in the request. + /// When true, applies the default branding style options. These default options are managed by Amazon Cognito. You can modify them later in the branding designer. When you specify true for this option, you must also omit values for Settings and Assets in the request. public let useCognitoProvidedValues: Bool? /// The ID of the user pool where you want to create a new branding style. public let userPoolId: String @@ -2615,9 +2632,9 @@ extension CognitoIdentityProvider { public let identifier: String /// A friendly name for the resource server. public let name: String - /// A list of scopes. Each scope is a key-value map with the keys name and description. + /// A list of custom scopes. Each scope is a key-value map with the keys ScopeName and ScopeDescription. The name of a custom scope is a combination of ScopeName and the resource server Name in this request, for example MyResourceServerName/MyScopeName. public let scopes: [ResourceServerScopeType]? - /// The user pool ID for the user pool. + /// The ID of the user pool where you want to create a resource server. public let userPoolId: String @inlinable @@ -2653,7 +2670,7 @@ extension CognitoIdentityProvider { } public struct CreateResourceServerResponse: AWSDecodableShape { - /// The newly created resource server. + /// The details of the new resource server. public let resourceServer: ResourceServerType @inlinable @@ -2667,11 +2684,11 @@ extension CognitoIdentityProvider { } public struct CreateUserImportJobRequest: AWSEncodableShape { - /// The role ARN for the Amazon CloudWatch Logs Logging role for the user import job. + /// You must specify an IAM role that has permission to log import-job results to Amazon CloudWatch Logs. This parameter is the ARN of that role. public let cloudWatchLogsRoleArn: String - /// The job name for the user import job. + /// A friendly name for the user import job. public let jobName: String - /// The user pool ID for the user pool that the users are being imported into. + /// The ID of the user pool that you want to import users into. public let userPoolId: String @inlinable @@ -2701,7 +2718,7 @@ extension CognitoIdentityProvider { } public struct CreateUserImportJobResponse: AWSDecodableShape { - /// The job object that represents the user import job. + /// The details of the user import job. public let userImportJob: UserImportJobType? @inlinable @@ -2733,18 +2750,18 @@ extension CognitoIdentityProvider { /// AllowedOAuthFlowsUserPoolClient in a request with the CLI or SDKs, it defaults /// to false. public let allowedOAuthFlowsUserPoolClient: Bool? - /// The allowed OAuth scopes. Possible values provided by OAuth are phone, email, openid, and profile. Possible values provided by Amazon Web Services are aws.cognito.signin.user.admin. Custom scopes created in Resource Servers are also supported. + /// The OAuth 2.0 scopes that you want to permit your app client to authorize. Scopes govern access control to user pool self-service API operations, user data from the userInfo endpoint, and third-party APIs. Possible values provided by OAuth are phone, email, openid, and profile. Possible values provided by Amazon Web Services are aws.cognito.signin.user.admin. Custom scopes created in Resource Servers are also supported. public let allowedOAuthScopes: [String]? - /// The user pool analytics configuration for collecting metrics and sending them to your Amazon Pinpoint campaign. In Amazon Web Services Regions where Amazon Pinpoint isn't available, user pools only support sending events to Amazon Pinpoint projects in Amazon Web Services Region us-east-1. In Regions where Amazon Pinpoint is available, user pools support sending events to Amazon Pinpoint projects within that same Region. + /// The user pool analytics configuration for collecting metrics and sending them to your Amazon Pinpoint campaign. In Amazon Web Services Regions where Amazon Pinpoint isn't available, user pools might not have access to analytics or might be configurable with campaigns in the US East (N. Virginia) Region. For more information, see Using Amazon Pinpoint analytics. public let analyticsConfiguration: AnalyticsConfigurationType? /// Amazon Cognito creates a session token for each API request in an authentication flow. AuthSessionValidity is the duration, /// in minutes, of that session token. Your user pool native user must respond to each authentication challenge before the session expires. public let authSessionValidity: Int? - /// A list of allowed redirect (callback) URLs for the IdPs. A redirect URI must: Be an absolute URI. Be registered with the authorization server. Not include a fragment component. See OAuth 2.0 - Redirection Endpoint. Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only. App callback URLs such as myapp://example are also supported. + /// A list of allowed redirect (callback) URLs for the IdPs. A redirect URI must: Be an absolute URI. Be registered with the authorization server. Amazon Cognito doesn't accept authorization requests with redirect_uri values that aren't in the list of CallbackURLs that you provide in this parameter. Not include a fragment component. See OAuth 2.0 - Redirection Endpoint. Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only. App callback URLs such as myapp://example are also supported. public let callbackURLs: [String]? - /// The client name for the user pool client you would like to create. + /// A friendly name for the app client that you want to create. public let clientName: String - /// The default redirect URI. In app clients with one assigned IdP, replaces redirect_uri in authentication requests. Must be in the CallbackURLs list. A redirect URI must: Be an absolute URI. Be registered with the authorization server. Not include a fragment component. For more information, see Default redirect URI. Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only. App callback URLs such as myapp://example are also supported. + /// The default redirect URI. In app clients with one assigned IdP, replaces redirect_uri in authentication requests. Must be in the CallbackURLs list. public let defaultRedirectURI: String? /// Activates the propagation of additional user context data. For more information about propagation of user context data, see Adding advanced security to a user pool. If you don’t include this parameter, you can't send device fingerprint information, including source IP address, to Amazon Cognito advanced security. You can only activate EnablePropagateAdditionalUserContextData in an app client that has a client secret. public let enablePropagateAdditionalUserContextData: Bool? @@ -2756,7 +2773,7 @@ extension CognitoIdentityProvider { /// You can't assign these legacy ExplicitAuthFlows values to user pool clients at the same time as values that begin with ALLOW_, /// like ALLOW_USER_SRP_AUTH. public let explicitAuthFlows: [ExplicitAuthFlowsType]? - /// Boolean to specify whether you want to generate a secret for the user pool client being created. + /// When true, generates a client secret for the app client. Client secrets are used with server-side and machine-to-machine applications. For more information, see App client types. public let generateSecret: Bool? /// The ID token time limit. After this limit expires, your user can't use /// their ID token. To specify the time unit for IdTokenValidity as @@ -2767,7 +2784,7 @@ extension CognitoIdentityProvider { /// Valid range is displayed below in seconds. If you don't specify otherwise in the configuration of your app client, your ID /// tokens are valid for one hour. public let idTokenValidity: Int? - /// A list of allowed logout URLs for the IdPs. + /// A list of allowed logout URLs for managed login authentication. For more information, see Logout endpoint. public let logoutURLs: [String]? /// Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to ENABLED and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to LEGACY, those APIs return a UserNotFoundException exception if the user doesn't exist in the user pool. Valid values include: ENABLED - This prevents user existence-related errors. LEGACY - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented. Defaults to LEGACY when you don't provide a value. public let preventUserExistenceErrors: PreventUserExistenceErrorTypes? @@ -2784,11 +2801,11 @@ extension CognitoIdentityProvider { /// in seconds. If you don't specify otherwise in the configuration of your app client, your refresh /// tokens are valid for 30 days. public let refreshTokenValidity: Int? - /// A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: COGNITO, Facebook, Google, SignInWithApple, and LoginWithAmazon. You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example MySAMLIdP or MyOIDCIdP. This setting applies to providers that you can access with the hosted UI and OAuth 2.0 authorization server. The removal of COGNITO from this list doesn't prevent authentication operations for local users with the user pools API in an Amazon Web Services SDK. The only way to prevent API-based authentication is to block access with a WAF rule. + /// A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: COGNITO, Facebook, Google, SignInWithApple, and LoginWithAmazon. You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example MySAMLIdP or MyOIDCIdP. This setting applies to providers that you can access with managed login. The removal of COGNITO from this list doesn't prevent authentication operations for local users with the user pools API in an Amazon Web Services SDK. The only way to prevent API-based authentication is to block access with a WAF rule. public let supportedIdentityProviders: [String]? - /// The units in which the validity times are represented. The default unit for RefreshToken is days, and default for ID and access tokens are hours. + /// The units that validity times are represented in. The default unit for refresh tokens is days, and the default for ID and access tokens are hours. public let tokenValidityUnits: TokenValidityUnitsType? - /// The user pool ID for the user pool where you want to create a user pool client. + /// The ID of the user pool where you want to create an app client. public let userPoolId: String /// The list of user attributes that you want your app client to have write access to. After your user authenticates in your app, their access token authorizes them to set or modify their own attribute value for any attribute in this list. An example of this kind of activity is when you present your user with a form to update their profile information and they change their last name. Your app then makes an UpdateUserAttributes API request and sets family_name to the new value. When you don't specify the WriteAttributes for your app client, your app can write the values of the Standard attributes of your user pool. When your user pool has write access to these default attributes, WriteAttributes doesn't return any information. Amazon Cognito only populates WriteAttributes in the API response if you have specified your own custom set of write attributes. If your app client allows users to sign in through an IdP, this array must include all attributes that you have mapped to IdP attributes. Amazon Cognito updates mapped attributes when users sign in to your application through an IdP. If your app client does not have write access to a mapped attribute, Amazon Cognito throws an error when it tries to update the attribute. For more information, see Specifying IdP Attribute Mappings for Your user pool. public let writeAttributes: [String]? @@ -2899,7 +2916,7 @@ extension CognitoIdentityProvider { } public struct CreateUserPoolClientResponse: AWSDecodableShape { - /// The user pool client that was just created. + /// The details of the new app client. public let userPoolClient: UserPoolClientType? @inlinable @@ -2913,11 +2930,11 @@ extension CognitoIdentityProvider { } public struct CreateUserPoolDomainRequest: AWSEncodableShape { - /// The configuration for a custom domain that hosts the sign-up and sign-in webpages for your application. Provide this parameter only if you want to use a custom domain for your user pool. Otherwise, you can exclude this parameter and use the Amazon Cognito hosted domain instead. For more information about the hosted domain and custom domains, see Configuring a User Pool Domain. + /// The configuration for a custom domain. Configures your domain with an Certificate Manager certificate in the us-east-1 Region. Provide this parameter only if you want to use a custom domain for your user pool. Otherwise, you can exclude this parameter and use a prefix domain instead. For more information about the hosted domain and custom domains, see Configuring a User Pool Domain. public let customDomainConfig: CustomDomainConfigType? - /// The domain string. For custom domains, this is the fully-qualified domain name, such as auth.example.com. For Amazon Cognito prefix domains, this is the prefix alone, such as auth. + /// The domain string. For custom domains, this is the fully-qualified domain name, such as auth.example.com. For prefix domains, this is the prefix alone, such as myprefix. A prefix value of myprefix for a user pool in the us-east-1 Region results in a domain of myprefix.auth.us-east-1.amazoncognito.com. public let domain: String - /// The version of managed login branding that you want to apply to your domain. A value of 1 indicates hosted UI (classic) branding and a version of 2 indicates managed login branding. Managed login requires that your user pool be configured for any feature plan other than Lite. + /// The version of managed login branding that you want to apply to your domain. A value of 1 indicates hosted UI (classic) and a version of 2 indicates managed login. Managed login requires that your user pool be configured for any feature plan other than Lite. public let managedLoginVersion: Int? /// The ID of the user pool where you want to add a domain. public let userPoolId: String @@ -2951,7 +2968,7 @@ extension CognitoIdentityProvider { public struct CreateUserPoolDomainResponse: AWSDecodableShape { /// The Amazon CloudFront endpoint that you use as the target of the alias that you set up with your Domain Name Service (DNS) provider. Amazon Cognito returns this value if you set a custom domain with CustomDomainConfig. If you set an Amazon Cognito prefix domain, this operation returns a blank response. public let cloudFrontDomain: String? - /// The version of managed login branding applied your domain. A value of 1 indicates hosted UI (classic) branding and a version of 2 indicates managed login branding. + /// The version of managed login branding applied your domain. A value of 1 indicates hosted UI (classic) and a version of 2 indicates managed login. public let managedLoginVersion: Int? @inlinable @@ -2969,11 +2986,11 @@ extension CognitoIdentityProvider { public struct CreateUserPoolRequest: AWSEncodableShape { /// The available verified method a user can use to recover their password when they call ForgotPassword. You can use this setting to define a preferred method when a user has more than one method available. With this setting, SMS doesn't qualify for a valid password recovery mechanism if the user also has SMS multi-factor authentication (MFA) activated. In the absence of this setting, Amazon Cognito uses the legacy behavior to determine the recovery method where SMS is preferred through email. public let accountRecoverySetting: AccountRecoverySettingType? - /// The configuration for AdminCreateUser requests. + /// The configuration for AdminCreateUser requests. Includes the template for the invitation message for new users, the duration of temporary passwords, and permitting self-service sign-up. public let adminCreateUserConfig: AdminCreateUserConfigType? - /// Attributes supported as an alias for this user pool. Possible values: phone_number, email, or preferred_username. + /// Attributes supported as an alias for this user pool. Possible values: phone_number, email, or preferred_username. For more information about alias attributes, see Customizing sign-in attributes. public let aliasAttributes: [AliasAttributeType]? - /// The attributes to be auto-verified. Possible values: email, phone_number. + /// The attributes that you want your user pool to automatically verify. Possible values: email, phone_number. For more information see Verifying contact information at sign-up. public let autoVerifiedAttributes: [VerifiedAttributeType]? /// When active, DeletionProtection prevents accidental deletion of your user /// pool. Before you can delete a user pool that you have protected against deletion, you @@ -2982,7 +2999,7 @@ extension CognitoIdentityProvider { /// send a new DeleteUserPool request after you deactivate deletion protection in an /// UpdateUserPool API request. public let deletionProtection: DeletionProtectionType? - /// The device-remembering configuration for a user pool. A null value indicates that you have deactivated device remembering in your user pool. When you provide a value for any DeviceConfiguration field, you activate the Amazon Cognito device-remembering feature. + /// The device-remembering configuration for a user pool. Device remembering or device tracking is a "Remember me on this device" option for user pools that perform authentication with the device key of a trusted device in the back end, instead of a user-provided MFA code. For more information about device authentication, see Working with user devices in your user pool. A null value indicates that you have deactivated device remembering in your user pool. When you provide a value for any DeviceConfiguration field, you activate the Amazon Cognito device-remembering feature. For more infor public let deviceConfiguration: DeviceConfigurationType? /// The email configuration of your user pool. The email configuration type sets your preferred sending method, Amazon Web Services Region, and sender for messages from your user pool. public let emailConfiguration: EmailConfigurationType? @@ -2992,17 +3009,17 @@ extension CognitoIdentityProvider { public let emailVerificationSubject: String? /// A collection of user pool Lambda triggers. Amazon Cognito invokes triggers at several possible stages of authentication operations. Triggers can modify the outcome of the operations that invoked them. public let lambdaConfig: LambdaConfigType? - /// Specifies MFA configuration details. + /// Sets multi-factor authentication (MFA) to be on, off, or optional. When ON, all users must set up MFA before they can sign in. When OPTIONAL, your application must make a client-side determination of whether a user wants to register an MFA device. For user pools with adaptive authentication with threat protection, choose OPTIONAL. public let mfaConfiguration: UserPoolMfaType? - /// The policies associated with the new user pool. + /// The password policy and sign-in policy in the user pool. The password policy sets options like password complexity requirements and password history. The sign-in policy sets the options available to applications in choice-based authentication. public let policies: UserPoolPolicyType? - /// A string used to name the user pool. + /// A friendlhy name for your user pool. public let poolName: String - /// An array of schema attributes for the new user pool. These attributes can be standard or custom attributes. + /// An array of attributes for the new user pool. You can add custom attributes and modify the properties of default attributes. The specifications in this parameter set the required attributes in your user pool. For more information, see Working with user attributes. public let schema: [SchemaAttributeType]? /// A string representing the SMS authentication message. public let smsAuthenticationMessage: String? - /// The SMS configuration with the settings that your Amazon Cognito user pool must use to send an SMS message from your Amazon Web Services account through Amazon Simple Notification Service. To send SMS messages with Amazon SNS in the Amazon Web Services Region that you want, the Amazon Cognito user pool uses an Identity and Access Management (IAM) role in your Amazon Web Services account. + /// The SMS configuration with the settings that your Amazon Cognito user pool must use to send an SMS message from your Amazon Web Services account through Amazon Simple Notification Service. To send SMS messages with Amazon SNS in the Amazon Web Services Region that you want, the Amazon Cognito user pool uses an Identity and Access Management (IAM) role in your Amazon Web Services account. For more information see SMS message settings. public let smsConfiguration: SmsConfigurationType? /// This parameter is no longer used. See VerificationMessageTemplateType. public let smsVerificationMessage: String? @@ -3011,9 +3028,9 @@ extension CognitoIdentityProvider { /// more information, see /// Verifying updates to email addresses and phone numbers. public let userAttributeUpdateSettings: UserAttributeUpdateSettingsType? - /// Specifies whether a user can use an email address or phone number as a username when they sign up. + /// Specifies whether a user can use an email address or phone number as a username when they sign up. For more information, see Customizing sign-in attributes. public let usernameAttributes: [UsernameAttributeType]? - /// Case sensitivity on the username input for the selected sign-in option. When case sensitivity is set to False (case insensitive), users can sign in with any combination of capital and lowercase letters. For example, username, USERNAME, or UserName, or for email, email@example.com or EMaiL@eXamplE.Com. For most use cases, set case sensitivity to False (case insensitive) as a best practice. When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in case as the same user, and prevents a case variation from being assigned to the same attribute for a different user. This configuration is immutable after you set it. For more information, see UsernameConfigurationType. + /// Sets the case sensitivity option for sign-in usernames. When CaseSensitive is false (case insensitive), users can sign in with any combination of capital and lowercase letters. For example, username, USERNAME, or UserName, or for email, email@example.com or EMaiL@eXamplE.Com. For most use cases, set case sensitivity to false as a best practice. When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in case as the same user, and prevents a case variation from being assigned to the same attribute for a different user. When CaseSensitive is true (case sensitive), Amazon Cognito interprets USERNAME and UserName as distinct users. This configuration is immutable after you set it. public let usernameConfiguration: UsernameConfigurationType? /// User pool add-ons. Contains settings for activation of advanced security features. To log user security information but take no action, set to AUDIT. To configure automatic security responses to risky traffic to your user pool, set to ENFORCED. For more information, see Adding advanced security to a user pool. public let userPoolAddOns: UserPoolAddOnsType? @@ -3116,7 +3133,7 @@ extension CognitoIdentityProvider { } public struct CreateUserPoolResponse: AWSDecodableShape { - /// A container for the user pool details. + /// The details of the created user pool. public let userPool: UserPoolType? @inlinable @@ -3198,9 +3215,9 @@ extension CognitoIdentityProvider { } public struct DeleteGroupRequest: AWSEncodableShape { - /// The name of the group. + /// The name of the group that you want to delete. public let groupName: String - /// The user pool ID for the user pool. + /// The ID of the user pool where you want to delete the group. public let userPoolId: String @inlinable @@ -3225,9 +3242,9 @@ extension CognitoIdentityProvider { } public struct DeleteIdentityProviderRequest: AWSEncodableShape { - /// The IdP name. + /// The name of the IdP that you want to delete. public let providerName: String - /// The user pool ID. + /// The ID of the user pool where you want to delete the identity provider. public let userPoolId: String @inlinable @@ -3277,9 +3294,9 @@ extension CognitoIdentityProvider { } public struct DeleteResourceServerRequest: AWSEncodableShape { - /// The identifier for the resource server. + /// The identifier of the resource server that you want to delete. public let identifier: String - /// The user pool ID for the user pool that hosts the resource server. + /// The ID of the user pool where you want to delete the resource server. public let userPoolId: String @inlinable @@ -3306,7 +3323,7 @@ extension CognitoIdentityProvider { public struct DeleteUserAttributesRequest: AWSEncodableShape { /// A valid access token that Amazon Cognito issued to the user whose attributes you want to delete. public let accessToken: String - /// An array of strings representing the user attribute names you want to delete. For custom attributes, you must prependattach the custom: prefix to the front of the attribute name. + /// An array of strings representing the user attribute names you want to delete. For custom attributes, you must prepend the custom: prefix to the attribute name, for example custom:department. public let userAttributeNames: [String] @inlinable @@ -3335,9 +3352,9 @@ extension CognitoIdentityProvider { } public struct DeleteUserPoolClientRequest: AWSEncodableShape { - /// The app client ID of the app associated with the user pool. + /// The ID of the user pool app client that you want to delete. public let clientId: String - /// The user pool ID for the user pool where you want to delete the client. + /// The ID of the user pool where you want to delete the client. public let userPoolId: String @inlinable @@ -3362,9 +3379,9 @@ extension CognitoIdentityProvider { } public struct DeleteUserPoolDomainRequest: AWSEncodableShape { - /// The domain string. For custom domains, this is the fully-qualified domain name, such as auth.example.com. For Amazon Cognito prefix domains, this is the prefix alone, such as auth. + /// The domain that you want to delete. For custom domains, this is the fully-qualified domain name, such as auth.example.com. For Amazon Cognito prefix domains, this is the prefix alone, such as auth. public let domain: String - /// The user pool ID. + /// The ID of the user pool where you want to delete the domain. public let userPoolId: String @inlinable @@ -3393,7 +3410,7 @@ extension CognitoIdentityProvider { } public struct DeleteUserPoolRequest: AWSEncodableShape { - /// The user pool ID for the user pool you want to delete. + /// The ID of the user pool that you want to delete. public let userPoolId: String @inlinable @@ -3431,9 +3448,9 @@ extension CognitoIdentityProvider { } public struct DeleteWebAuthnCredentialRequest: AWSEncodableShape { - /// A valid access token that Amazon Cognito issued to the user whose passkey you want to delete. + /// A valid access token that Amazon Cognito issued to the user whose passkey credential you want to delete. public let accessToken: String - /// The unique identifier of the passkey that you want to delete. Look up registered devices with ListWebAuthnCredentials. + /// The unique identifier of the passkey that you want to delete. Look up registered devices with ListWebAuthnCredentials. public let credentialId: String @inlinable @@ -3458,9 +3475,9 @@ extension CognitoIdentityProvider { } public struct DescribeIdentityProviderRequest: AWSEncodableShape { - /// The IdP name. + /// The name of the IdP that you want to describe. public let providerName: String - /// The user pool ID. + /// The ID of the user pool that has the IdP that you want to describe.. public let userPoolId: String @inlinable @@ -3485,7 +3502,7 @@ extension CognitoIdentityProvider { } public struct DescribeIdentityProviderResponse: AWSDecodableShape { - /// The identity provider details. + /// The details of the requested IdP. public let identityProvider: IdentityProviderType @inlinable @@ -3589,7 +3606,7 @@ extension CognitoIdentityProvider { public struct DescribeResourceServerRequest: AWSEncodableShape { /// A unique resource server identifier for the resource server. The identifier can be an API friendly name like solar-system-data. You can also set an API URL like https://solar-system-data-api.example.com as your identifier. Amazon Cognito represents scopes in the access token in the format $resource-server-identifier/$scope. Longer scope-identifier strings increase the size of your access tokens. public let identifier: String - /// The user pool ID for the user pool that hosts the resource server. + /// The ID of the user pool that hosts the resource server. public let userPoolId: String @inlinable @@ -3614,7 +3631,7 @@ extension CognitoIdentityProvider { } public struct DescribeResourceServerResponse: AWSDecodableShape { - /// The resource server. + /// The details of the requested resource server. public let resourceServer: ResourceServerType @inlinable @@ -3628,9 +3645,9 @@ extension CognitoIdentityProvider { } public struct DescribeRiskConfigurationRequest: AWSEncodableShape { - /// The app client ID. + /// The ID of the app client with the risk configuration that you want to inspect. You can apply default risk configuration at the user pool level and further customize it from user pool defaults at the app-client level. Specify ClientId to inspect client-level configuration, or UserPoolId to inspect pool-level configuration. public let clientId: String? - /// The user pool ID. + /// The ID of the user pool with the risk configuration that you want to inspect. You can apply default risk configuration at the user pool level and further customize it from user pool defaults at the app-client level. Specify ClientId to inspect client-level configuration, or UserPoolId to inspect pool-level configuration. public let userPoolId: String @inlinable @@ -3655,7 +3672,7 @@ extension CognitoIdentityProvider { } public struct DescribeRiskConfigurationResponse: AWSDecodableShape { - /// The risk configuration. + /// The details of the requested risk configuration. public let riskConfiguration: RiskConfigurationType @inlinable @@ -3669,9 +3686,9 @@ extension CognitoIdentityProvider { } public struct DescribeUserImportJobRequest: AWSEncodableShape { - /// The job ID for the user import job. + /// The Id of the user import job that you want to describe. public let jobId: String - /// The user pool ID for the user pool that the users are being imported into. + /// The ID of the user pool that's associated with the import job. public let userPoolId: String @inlinable @@ -3696,7 +3713,7 @@ extension CognitoIdentityProvider { } public struct DescribeUserImportJobResponse: AWSDecodableShape { - /// The job object that represents the user import job. + /// The details of the user import job. public let userImportJob: UserImportJobType? @inlinable @@ -3710,9 +3727,9 @@ extension CognitoIdentityProvider { } public struct DescribeUserPoolClientRequest: AWSEncodableShape { - /// The app client ID of the app associated with the user pool. + /// The ID of the app client that you want to describe. public let clientId: String - /// The user pool ID for the user pool you want to describe. + /// The ID of the user pool that contains the app client you want to describe. public let userPoolId: String @inlinable @@ -3737,7 +3754,7 @@ extension CognitoIdentityProvider { } public struct DescribeUserPoolClientResponse: AWSDecodableShape { - /// The user pool client from a server response to describe the user pool client. + /// The details of the request app client. public let userPoolClient: UserPoolClientType? @inlinable @@ -3751,7 +3768,7 @@ extension CognitoIdentityProvider { } public struct DescribeUserPoolDomainRequest: AWSEncodableShape { - /// The domain string. For custom domains, this is the fully-qualified domain name, such as auth.example.com. For Amazon Cognito prefix domains, this is the prefix alone, such as auth. + /// The domain that you want to describe. For custom domains, this is the fully-qualified domain name, such as auth.example.com. For Amazon Cognito prefix domains, this is the prefix alone, such as auth. public let domain: String @inlinable @@ -3771,7 +3788,7 @@ extension CognitoIdentityProvider { } public struct DescribeUserPoolDomainResponse: AWSDecodableShape { - /// A domain description object containing information about the domain. + /// The details of the requested user pool domain. public let domainDescription: DomainDescriptionType? @inlinable @@ -3785,7 +3802,7 @@ extension CognitoIdentityProvider { } public struct DescribeUserPoolRequest: AWSEncodableShape { - /// The user pool ID for the user pool you want to describe. + /// The ID of the user pool you want to describe. public let userPoolId: String @inlinable @@ -3805,7 +3822,7 @@ extension CognitoIdentityProvider { } public struct DescribeUserPoolResponse: AWSDecodableShape { - /// The container of metadata returned by the server to describe the pool. + /// The details of the requested user pool. public let userPool: UserPoolType? @inlinable @@ -3979,9 +3996,9 @@ extension CognitoIdentityProvider { } public struct EmailMfaConfigType: AWSEncodableShape & AWSDecodableShape { - /// The template for the email message that your user pool sends to users with an MFA code. The message must contain the {####} placeholder. In the message, Amazon Cognito replaces this placeholder with the code. If you don't provide this parameter, Amazon Cognito sends messages in the default format. + /// The template for the email message that your user pool sends to users with a code for MFA and sign-in with an email OTP. The message must contain the {####} placeholder. In the message, Amazon Cognito replaces this placeholder with the code. If you don't provide this parameter, Amazon Cognito sends messages in the default format. public let message: String? - /// The subject of the email message that your user pool sends to users with an MFA code. + /// The subject of the email message that your user pool sends to users with a code for MFA and email OTP sign-in. public let subject: String? @inlinable @@ -4150,13 +4167,14 @@ extension CognitoIdentityProvider { /// The ID of the client associated with the user pool. public let clientId: String /// A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the ForgotPassword API action, Amazon Cognito invokes any functions that are assigned to the following triggers: pre sign-up, custom message, and user migration. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ForgotPassword request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see - /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information. + /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't send sensitive information in this parameter. public let clientMetadata: [String: String]? - /// A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. + /// A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. For more information about SecretHash, see Computing secret hash values. public let secretHash: String? /// Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced /// security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito - /// when it makes API requests. + /// when it makes API requests. For more information, see Collecting data for threat protection in + /// applications. public let userContextData: UserContextDataType? /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String @@ -4214,7 +4232,7 @@ extension CognitoIdentityProvider { } public struct GetCSVHeaderRequest: AWSEncodableShape { - /// The user pool ID for the user pool that the users are to be imported into. + /// The ID of the user pool that the users are to be imported into. public let userPoolId: String @inlinable @@ -4236,7 +4254,7 @@ extension CognitoIdentityProvider { public struct GetCSVHeaderResponse: AWSDecodableShape { /// The header information of the CSV file for the user import job. public let csvHeader: [String]? - /// The user pool ID for the user pool that the users are to be imported into. + /// The ID of the user pool that the users are to be imported into. public let userPoolId: String? @inlinable @@ -4293,7 +4311,7 @@ extension CognitoIdentityProvider { public struct GetGroupRequest: AWSEncodableShape { /// The name of the group. public let groupName: String - /// The user pool ID for the user pool. + /// The ID of the user pool. public let userPoolId: String @inlinable @@ -4443,7 +4461,7 @@ extension CognitoIdentityProvider { public struct GetUICustomizationRequest: AWSEncodableShape { /// The client ID for the client app. public let clientId: String? - /// The user pool ID for the user pool. + /// The ID of the user pool. public let userPoolId: String @inlinable @@ -4487,7 +4505,7 @@ extension CognitoIdentityProvider { /// The attribute name returned by the server response to get the user attribute verification code. public let attributeName: String /// A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the GetUserAttributeVerificationCode API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your GetUserAttributeVerificationCode request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see - /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information. + /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't send sensitive information in this parameter. public let clientMetadata: [String: String]? @inlinable @@ -4806,20 +4824,21 @@ extension CognitoIdentityProvider { public struct InitiateAuthRequest: AWSEncodableShape { /// The Amazon Pinpoint analytics metadata that contributes to your metrics for InitiateAuth calls. public let analyticsMetadata: AnalyticsMetadataType? - /// The authentication flow that you want to initiate. The AuthParameters that you must submit are linked to the flow that you submit. For example: USER_AUTH: Request a preferred authentication type or review available authentication types. From the offered authentication types, select one in a challenge response and then authenticate with that method in an additional challenge response. REFRESH_TOKEN_AUTH: Receive new ID and access tokens when you pass a REFRESH_TOKEN parameter with a valid refresh token as the value. USER_SRP_AUTH: Receive secure remote password (SRP) variables for the next challenge, PASSWORD_VERIFIER, when you pass USERNAME and SRP_A parameters. USER_PASSWORD_AUTH: Receive new tokens or the next challenge, for example SOFTWARE_TOKEN_MFA, when you pass USERNAME and PASSWORD parameters. Valid values include the following: USER_AUTH The entry point for sign-in with passwords, one-time passwords, biometric devices, and security keys. USER_SRP_AUTH Username-password authentication with the Secure Remote Password (SRP) protocol. For more information, see Use SRP password verification in custom authentication flow. REFRESH_TOKEN_AUTH and REFRESH_TOKEN Provide a valid refresh token and receive new ID and access tokens. For more information, see Using the refresh token. CUSTOM_AUTH Custom authentication with Lambda triggers. For more information, see Custom authentication challenge Lambda triggers. USER_PASSWORD_AUTH Username-password authentication with the password sent directly in the request. For more information, see Admin authentication flow. ADMIN_USER_PASSWORD_AUTH is a flow type of AdminInitiateAuth and isn't valid for InitiateAuth. ADMIN_NO_SRP_AUTH is a legacy server-side username-password flow and isn't valid for InitiateAuth. + /// The authentication flow that you want to initiate. Each AuthFlow has linked AuthParameters that you must submit. The following are some example flows and their parameters. USER_AUTH: Request a preferred authentication type or review available authentication types. From the offered authentication types, select one in a challenge response and then authenticate with that method in an additional challenge response. REFRESH_TOKEN_AUTH: Receive new ID and access tokens when you pass a REFRESH_TOKEN parameter with a valid refresh token as the value. USER_SRP_AUTH: Receive secure remote password (SRP) variables for the next challenge, PASSWORD_VERIFIER, when you pass USERNAME and SRP_A parameters. USER_PASSWORD_AUTH: Receive new tokens or the next challenge, for example SOFTWARE_TOKEN_MFA, when you pass USERNAME and PASSWORD parameters. All flows USER_AUTH The entry point for sign-in with passwords, one-time passwords, and WebAuthN authenticators. USER_SRP_AUTH Username-password authentication with the Secure Remote Password (SRP) protocol. For more information, see Use SRP password verification in custom authentication flow. REFRESH_TOKEN_AUTH and REFRESH_TOKEN Provide a valid refresh token and receive new ID and access tokens. For more information, see Using the refresh token. CUSTOM_AUTH Custom authentication with Lambda triggers. For more information, see Custom authentication challenge Lambda triggers. USER_PASSWORD_AUTH Username-password authentication with the password sent directly in the request. For more information, see Admin authentication flow. ADMIN_USER_PASSWORD_AUTH is a flow type of AdminInitiateAuth and isn't valid for InitiateAuth. ADMIN_NO_SRP_AUTH is a legacy server-side username-password flow and isn't valid for InitiateAuth. public let authFlow: AuthFlowType /// The authentication parameters. These are inputs corresponding to the AuthFlow that you're invoking. The required values depend on the value of AuthFlow: For USER_AUTH: USERNAME (required), PREFERRED_CHALLENGE. If you don't provide a value for PREFERRED_CHALLENGE, Amazon Cognito responds with the AvailableChallenges parameter that specifies the available sign-in methods. For USER_SRP_AUTH: USERNAME (required), SRP_A (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY. For USER_PASSWORD_AUTH: USERNAME (required), PASSWORD (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY. For REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY. For CUSTOM_AUTH: USERNAME (required), SECRET_HASH (if app client is configured with client secret), DEVICE_KEY. To start the authentication flow with password verification, include ChallengeName: SRP_A and SRP_A: (The SRP_A Value). For more information about SECRET_HASH, see Computing secret hash values. For information about DEVICE_KEY, see Working with user devices in your user pool. public let authParameters: [String: String]? /// The app client ID. public let clientId: String /// A map of custom key-value pairs that you can provide as input for certain custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the InitiateAuth API action, Amazon Cognito invokes the Lambda functions that are specified for various triggers. The ClientMetadata value is passed as input to the functions for only the following triggers: Pre signup Pre authentication User migration When Amazon Cognito invokes the functions for these triggers, it passes a JSON payload, which the function receives as input. This payload contains a validationData attribute, which provides the data that you assigned to the ClientMetadata parameter in your InitiateAuth request. In your function code in Lambda, you can process the validationData value to enhance your workflow for your specific needs. When you use the InitiateAuth API action, Amazon Cognito also invokes the functions for the following triggers, but it doesn't provide the ClientMetadata value as input: Post authentication Custom message Pre token generation Create auth challenge Define auth challenge Custom email sender Custom SMS sender For more information, see - /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information. + /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't send sensitive information in this parameter. public let clientMetadata: [String: String]? /// The optional session ID from a ConfirmSignUp API request. You can sign in a user directly from the sign-up process with the USER_AUTH authentication flow. public let session: String? /// Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced /// security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito - /// when it makes API requests. + /// when it makes API requests. For more information, see Collecting data for threat protection in + /// applications. public let userContextData: UserContextDataType? @inlinable @@ -5058,7 +5077,7 @@ extension CognitoIdentityProvider { public let limit: Int? /// An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list. public let nextToken: String? - /// The user pool ID for the user pool. + /// The ID of the user pool. public let userPoolId: String @inlinable @@ -5159,7 +5178,7 @@ extension CognitoIdentityProvider { public let maxResults: Int? /// A pagination token. public let nextToken: String? - /// The user pool ID for the user pool. + /// The ID of the user pool. public let userPoolId: String @inlinable @@ -5247,7 +5266,7 @@ extension CognitoIdentityProvider { /// Subsequent requests return a new pagination token. By use of this token, you can paginate /// through the full list of items. public let paginationToken: String? - /// The user pool ID for the user pool that the users are being imported into. + /// The ID of the user pool that the users are being imported into. public let userPoolId: String @inlinable @@ -5299,7 +5318,7 @@ extension CognitoIdentityProvider { public let maxResults: Int? /// An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list. public let nextToken: String? - /// The user pool ID for the user pool where you want to list user pool clients. + /// The ID of the user pool where you want to list user pool clients. public let userPoolId: String @inlinable @@ -5395,7 +5414,7 @@ extension CognitoIdentityProvider { public let limit: Int? /// An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list. public let nextToken: String? - /// The user pool ID for the user pool. + /// The ID of the user pool. public let userPoolId: String @inlinable @@ -5459,7 +5478,7 @@ extension CognitoIdentityProvider { /// Subsequent requests return a new pagination token. By use of this token, you can paginate /// through the full list of items. public let paginationToken: String? - /// The user pool ID for the user pool on which the search should be performed. + /// The ID of the user pool on which the search should be performed. public let userPoolId: String @inlinable @@ -5656,7 +5675,7 @@ extension CognitoIdentityProvider { public let managedLoginBrandingId: String? /// A JSON file, encoded as a Document type, with the the settings that you want to apply to your style. public let settings: String? - /// When true, applies the default branding style options. This option reverts to a "blank" style that you can modify later in the branding designer. + /// When true, applies the default branding style options. This option reverts to default style options that are managed by Amazon Cognito. You can modify them later in the branding designer. When you specify true for this option, you must also omit values for Settings and Assets in the request. public let useCognitoProvidedValues: Bool? /// The user pool where the branding style is assigned. public let userPoolId: String? @@ -5995,13 +6014,14 @@ extension CognitoIdentityProvider { /// The ID of the client associated with the user pool. public let clientId: String /// A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the ResendConfirmationCode API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ResendConfirmationCode request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see - /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information. + /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't send sensitive information in this parameter. public let clientMetadata: [String: String]? - /// A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. + /// A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. For more information about SecretHash, see Computing secret hash values. public let secretHash: String? /// Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced /// security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito - /// when it makes API requests. + /// when it makes API requests. For more information, see Collecting data for threat protection in + /// applications. public let userContextData: UserContextDataType? /// The username of the user that you want to query or modify. The value of this parameter is typically your user's username, but it can be any of their alias attributes. If username isn't an alias attribute in your user pool, this value must be the sub of a local user or the username of a user from a third-party IdP. public let username: String @@ -6122,13 +6142,14 @@ extension CognitoIdentityProvider { /// The app client ID. public let clientId: String /// A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the RespondToAuthChallenge API action, Amazon Cognito invokes any functions that are assigned to the following triggers: post authentication, pre token generation, define auth challenge, create auth challenge, and verify auth challenge. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your RespondToAuthChallenge request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see - /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information. + /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't send sensitive information in this parameter. public let clientMetadata: [String: String]? /// The session that should be passed both ways in challenge-response calls to the service. If InitiateAuth or RespondToAuthChallenge API call determines that the caller must pass another challenge, they return a session with other challenge parameters. This session should be passed as it is to the next RespondToAuthChallenge API call. public let session: String? /// Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced /// security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito - /// when it makes API requests. + /// when it makes API requests. For more information, see Collecting data for threat protection in + /// applications. public let userContextData: UserContextDataType? @inlinable @@ -6485,7 +6506,7 @@ extension CognitoIdentityProvider { public let css: String? /// The uploaded logo image for the UI customization. public let imageFile: AWSBase64Data? - /// The user pool ID for the user pool. + /// The ID of the user pool. public let userPoolId: String @inlinable @@ -6690,17 +6711,18 @@ extension CognitoIdentityProvider { /// The ID of the client associated with the user pool. public let clientId: String /// A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the SignUp API action, Amazon Cognito invokes any functions that are assigned to the following triggers: pre sign-up, custom message, and post confirmation. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your SignUp request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see - /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information. + /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't send sensitive information in this parameter. public let clientMetadata: [String: String]? /// The password of the user you want to register. Users can sign up without a password when your user pool supports passwordless sign-in with email or SMS OTPs. To create a user with no password, omit this parameter or submit a blank value. You can only create a passwordless user when passwordless sign-in is available. See the SignInPolicyType property of CreateUserPool and UpdateUserPool. public let password: String? - /// A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. + /// A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. For more information about SecretHash, see Computing secret hash values. public let secretHash: String? /// An array of name-value pairs representing user attributes. For custom attributes, you must prepend the custom: prefix to the attribute name. public let userAttributes: [AttributeType]? /// Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced /// security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito - /// when it makes API requests. + /// when it makes API requests. For more information, see Collecting data for threat protection in + /// applications. public let userContextData: UserContextDataType? /// The username of the user that you want to sign up. The value of this parameter is typically a username, but can be any alias attribute in your user pool. public let username: String @@ -6876,7 +6898,7 @@ extension CognitoIdentityProvider { public struct StartUserImportJobRequest: AWSEncodableShape { /// The job ID for the user import job. public let jobId: String - /// The user pool ID for the user pool that the users are being imported into. + /// The ID of the user pool that the users are being imported into. public let userPoolId: String @inlinable @@ -6949,7 +6971,7 @@ extension CognitoIdentityProvider { public struct StopUserImportJobRequest: AWSEncodableShape { /// The job ID for the user import job. public let jobId: String - /// The user pool ID for the user pool that the users are being imported into. + /// The ID of the user pool that the users are being imported into. public let userPoolId: String @inlinable @@ -7230,7 +7252,7 @@ extension CognitoIdentityProvider { public let precedence: Int? /// The new role Amazon Resource Name (ARN) for the group. This is used for setting the cognito:roles and cognito:preferred_role claims in the token. public let roleArn: String? - /// The user pool ID for the user pool. + /// The ID of the user pool. public let userPoolId: String @inlinable @@ -7412,7 +7434,7 @@ extension CognitoIdentityProvider { public let name: String /// The scope values to be set for the resource server. public let scopes: [ResourceServerScopeType]? - /// The user pool ID for the user pool. + /// The ID of the user pool. public let userPoolId: String @inlinable @@ -7465,7 +7487,7 @@ extension CognitoIdentityProvider { /// A valid access token that Amazon Cognito issued to the user whose user attributes you want to update. public let accessToken: String /// A map of custom key-value pairs that you can provide as input for any custom workflows that this action initiates. You create custom workflows by assigning Lambda functions to user pool triggers. When you use the UpdateUserAttributes API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your UpdateUserAttributes request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs. For more information, see - /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information. + /// Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide. When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following: Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose. Validate the ClientMetadata value. Encrypt the ClientMetadata value. Don't send sensitive information in this parameter. public let clientMetadata: [String: String]? /// An array of name-value pairs representing user attributes. For custom attributes, you must prepend the custom: prefix to the attribute name. If you have set an attribute to require verification before Amazon Cognito updates its value, this request doesn’t immediately update the value of that attribute. After your user receives and responds to a verification message to verify the new value, Amazon Cognito updates the attribute value. Your user can sign in and receive messages with the original attribute value until they verify the new value. public let userAttributes: [AttributeType] @@ -7579,11 +7601,11 @@ extension CognitoIdentityProvider { /// in seconds. If you don't specify otherwise in the configuration of your app client, your refresh /// tokens are valid for 30 days. public let refreshTokenValidity: Int? - /// A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: COGNITO, Facebook, Google, SignInWithApple, and LoginWithAmazon. You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example MySAMLIdP or MyOIDCIdP. This setting applies to providers that you can access with the hosted UI and OAuth 2.0 authorization server. The removal of COGNITO from this list doesn't prevent authentication operations for local users with the user pools API in an Amazon Web Services SDK. The only way to prevent API-based authentication is to block access with a WAF rule. + /// A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: COGNITO, Facebook, Google, SignInWithApple, and LoginWithAmazon. You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example MySAMLIdP or MyOIDCIdP. This setting applies to providers that you can access with managed login. The removal of COGNITO from this list doesn't prevent authentication operations for local users with the user pools API in an Amazon Web Services SDK. The only way to prevent API-based authentication is to block access with a WAF rule. public let supportedIdentityProviders: [String]? /// The time units you use when you set the duration of ID, access, and refresh tokens. The default unit for RefreshToken is days, and the default for ID and access tokens is hours. public let tokenValidityUnits: TokenValidityUnitsType? - /// The user pool ID for the user pool where you want to update the user pool client. + /// The ID of the user pool where you want to update the user pool client. public let userPoolId: String /// The list of user attributes that you want your app client to have write access to. After your user authenticates in your app, their access token authorizes them to set or modify their own attribute value for any attribute in this list. An example of this kind of activity is when you present your user with a form to update their profile information and they change their last name. Your app then makes an UpdateUserAttributes API request and sets family_name to the new value. When you don't specify the WriteAttributes for your app client, your app can write the values of the Standard attributes of your user pool. When your user pool has write access to these default attributes, WriteAttributes doesn't return any information. Amazon Cognito only populates WriteAttributes in the API response if you have specified your own custom set of write attributes. If your app client allows users to sign in through an IdP, this array must include all attributes that you have mapped to IdP attributes. Amazon Cognito updates mapped attributes when users sign in to your application through an IdP. If your app client does not have write access to a mapped attribute, Amazon Cognito throws an error when it tries to update the attribute. For more information, see Specifying IdP Attribute Mappings for Your user pool. public let writeAttributes: [String]? @@ -7712,7 +7734,7 @@ extension CognitoIdentityProvider { public struct UpdateUserPoolDomainRequest: AWSEncodableShape { /// The configuration for a custom domain that hosts the sign-up and sign-in pages for your application. Use this object to specify an SSL certificate that is managed by ACM. When you create a custom domain, the passkey RP ID defaults to the custom domain. If you had a prefix domain active, this will cause passkey integration for your prefix domain to stop working due to a mismatch in RP ID. To keep the prefix domain passkey integration working, you can explicitly set RP ID to the prefix domain. Update the RP ID in a SetUserPoolMfaConfig request. - public let customDomainConfig: CustomDomainConfigType + public let customDomainConfig: CustomDomainConfigType? /// The domain name for the custom domain that hosts the sign-up and sign-in pages for your application. One example might be auth.example.com. This string can include only lowercase letters, numbers, and hyphens. Don't use a hyphen for the first or last character. Use periods to separate subdomain names. public let domain: String /// A version number that indicates the state of managed login for your domain. Version 1 is hosted UI (classic). Version 2 is the newer managed login with the branding designer. For more information, see Managed login. @@ -7721,7 +7743,7 @@ extension CognitoIdentityProvider { public let userPoolId: String @inlinable - public init(customDomainConfig: CustomDomainConfigType, domain: String, managedLoginVersion: Int? = nil, userPoolId: String) { + public init(customDomainConfig: CustomDomainConfigType? = nil, domain: String, managedLoginVersion: Int? = nil, userPoolId: String) { self.customDomainConfig = customDomainConfig self.domain = domain self.managedLoginVersion = managedLoginVersion @@ -7729,7 +7751,7 @@ extension CognitoIdentityProvider { } public func validate(name: String) throws { - try self.customDomainConfig.validate(name: "\(name).customDomainConfig") + try self.customDomainConfig?.validate(name: "\(name).customDomainConfig") try self.validate(self.domain, name: "domain", parent: name, max: 63) try self.validate(self.domain, name: "domain", parent: name, min: 1) try self.validate(self.domain, name: "domain", parent: name, pattern: "^[a-z0-9](?:[a-z0-9\\-]{0,61}[a-z0-9])?$") @@ -7807,7 +7829,7 @@ extension CognitoIdentityProvider { public let userAttributeUpdateSettings: UserAttributeUpdateSettingsType? /// User pool add-ons. Contains settings for activation of advanced security features. To log user security information but take no action, set to AUDIT. To configure automatic security responses to risky traffic to your user pool, set to ENFORCED. For more information, see Adding advanced security to a user pool. public let userPoolAddOns: UserPoolAddOnsType? - /// The user pool ID for the user pool you want to update. + /// The ID of the user pool you want to update. public let userPoolId: String /// The tag keys and values to assign to the user pool. A tag is a label that you can use to categorize and manage user pools in different ways, such as by purpose, owner, environment, or other criteria. public let userPoolTags: [String: String]? @@ -8121,7 +8143,7 @@ extension CognitoIdentityProvider { /// in seconds. If you don't specify otherwise in the configuration of your app client, your refresh /// tokens are valid for 30 days. public let refreshTokenValidity: Int? - /// A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: COGNITO, Facebook, Google, SignInWithApple, and LoginWithAmazon. You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example MySAMLIdP or MyOIDCIdP. This setting applies to providers that you can access with the hosted UI and OAuth 2.0 authorization server. The removal of COGNITO from this list doesn't prevent authentication operations for local users with the user pools API in an Amazon Web Services SDK. The only way to prevent API-based authentication is to block access with a WAF rule. + /// A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: COGNITO, Facebook, Google, SignInWithApple, and LoginWithAmazon. You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example MySAMLIdP or MyOIDCIdP. This setting applies to providers that you can access with managed login. The removal of COGNITO from this list doesn't prevent authentication operations for local users with the user pools API in an Amazon Web Services SDK. The only way to prevent API-based authentication is to block access with a WAF rule. public let supportedIdentityProviders: [String]? /// The time units that, with IdTokenValidity, AccessTokenValidity, and RefreshTokenValidity, set and display the duration of ID, access, and refresh tokens for an app client. You can assign a separate token validity unit to each type of token. public let tokenValidityUnits: TokenValidityUnitsType? @@ -8652,7 +8674,7 @@ extension CognitoIdentityProvider { public struct WebAuthnConfigurationType: AWSEncodableShape & AWSDecodableShape { /// Sets or displays the authentication domain, typically your user pool domain, that passkey providers must use as a relying party (RP) in their configuration. Under the following conditions, the passkey relying party ID must be the fully-qualified domain name of your custom domain: The user pool is configured for passkey authentication. The user pool has a custom domain, whether or not it also has a prefix domain. Your application performs authentication with managed login or the classic hosted UI. public let relyingPartyId: String? - /// Sets or displays your user-pool treatment for MFA with a passkey. You can override other MFA options and require passkey MFA, or you can set it as preferred. When passkey MFA is preferred, the hosted UI encourages users to register a passkey at sign-in. + /// When required, users can only register and sign in users with passkeys that are capable of user verification. When preferred, your user pool doesn't require the use of authenticators with user verification but encourages it. public let userVerification: UserVerificationType? @inlinable diff --git a/Sources/Soto/Services/Connect/Connect_api.swift b/Sources/Soto/Services/Connect/Connect_api.swift index b03cae8f16..c38fd127a8 100644 --- a/Sources/Soto/Services/Connect/Connect_api.swift +++ b/Sources/Soto/Services/Connect/Connect_api.swift @@ -1234,6 +1234,53 @@ public struct Connect: AWSService { return try await self.createHoursOfOperation(input, logger: logger) } + /// Creates an hours of operation override in an Amazon Connect hours of operation resource + @Sendable + @inlinable + public func createHoursOfOperationOverride(_ input: CreateHoursOfOperationOverrideRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateHoursOfOperationOverrideResponse { + try await self.client.execute( + operation: "CreateHoursOfOperationOverride", + path: "/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates an hours of operation override in an Amazon Connect hours of operation resource + /// + /// Parameters: + /// - config: Configuration information for the hours of operation override: day, start time, and end time. + /// - description: The description of the hours of operation override. + /// - effectiveFrom: The date from when the hours of operation override would be effective. + /// - effectiveTill: The date until when the hours of operation override would be effective. + /// - hoursOfOperationId: The identifier for the hours of operation + /// - instanceId: The identifier of the Amazon Connect instance. + /// - name: The name of the hours of operation override. + /// - logger: Logger use during operation + @inlinable + public func createHoursOfOperationOverride( + config: [HoursOfOperationOverrideConfig], + description: String? = nil, + effectiveFrom: String, + effectiveTill: String, + hoursOfOperationId: String, + instanceId: String, + name: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> CreateHoursOfOperationOverrideResponse { + let input = CreateHoursOfOperationOverrideRequest( + config: config, + description: description, + effectiveFrom: effectiveFrom, + effectiveTill: effectiveTill, + hoursOfOperationId: hoursOfOperationId, + instanceId: instanceId, + name: name + ) + return try await self.createHoursOfOperationOverride(input, logger: logger) + } + /// This API is in preview release for Amazon Connect and is subject to change. Initiates an Amazon Connect instance with all the supported channels enabled. It does not attach any storage, such as Amazon Simple Storage Service (Amazon S3) or Amazon Kinesis. It also does not allow for any configurations on features, such as Contact Lens for Amazon Connect. For more information, see Create an Amazon Connect instance in the Amazon Connect Administrator Guide. Amazon Connect enforces a limit on the total number of instances that you can create or delete in 30 days. /// If you exceed this limit, you will get an error message indicating there has been an excessive number of attempts at creating or deleting instances. /// You must wait 30 days before you can restart creating and deleting instances in your account. @@ -1487,7 +1534,51 @@ public struct Connect: AWSService { return try await self.createPrompt(input, logger: logger) } - /// This API is in preview release for Amazon Connect and is subject to change. Creates a new queue for the specified Amazon Connect instance. If the phone number is claimed to a traffic distribution group that was created in the same Region as the Amazon Connect instance where you are calling this API, then you can use a full phone number ARN or a UUID for OutboundCallerIdNumberId. However, if the phone number is claimed to a traffic distribution group that is in one Region, and you are calling this API from an instance in another Amazon Web Services Region that is associated with the traffic distribution group, you must provide a full phone number ARN. If a UUID is provided in this scenario, you will receive a ResourceNotFoundException. Only use the phone number ARN format that doesn't contain instance in the path, for example, arn:aws:connect:us-east-1:1234567890:phone-number/uuid. This is the same ARN format that is returned when you call the ListPhoneNumbersV2 API. If you plan to use IAM policies to allow/deny access to this API for phone number resources claimed to a traffic distribution group, see Allow or Deny queue API actions for phone numbers in a replica Region. + /// Creates registration for a device token and a chat contact to receive real-time push notifications. For more information about push notifications, see Set up push notifications in Amazon Connect for mobile chat in the Amazon Connect Administrator Guide. + @Sendable + @inlinable + public func createPushNotificationRegistration(_ input: CreatePushNotificationRegistrationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreatePushNotificationRegistrationResponse { + try await self.client.execute( + operation: "CreatePushNotificationRegistration", + path: "/push-notification/{InstanceId}/registrations", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates registration for a device token and a chat contact to receive real-time push notifications. For more information about push notifications, see Set up push notifications in Amazon Connect for mobile chat in the Amazon Connect Administrator Guide. + /// + /// Parameters: + /// - clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. + /// - contactConfiguration: The contact configuration for push notification registration. + /// - deviceToken: The push notification token issued by the Apple or Google gateways. + /// - deviceType: The device type to use when sending the message. + /// - instanceId: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. + /// - pinpointAppArn: The Amazon Resource Name (ARN) of the Pinpoint application. + /// - logger: Logger use during operation + @inlinable + public func createPushNotificationRegistration( + clientToken: String? = CreatePushNotificationRegistrationRequest.idempotencyToken(), + contactConfiguration: ContactConfiguration, + deviceToken: String, + deviceType: DeviceType, + instanceId: String, + pinpointAppArn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> CreatePushNotificationRegistrationResponse { + let input = CreatePushNotificationRegistrationRequest( + clientToken: clientToken, + contactConfiguration: contactConfiguration, + deviceToken: deviceToken, + deviceType: deviceType, + instanceId: instanceId, + pinpointAppArn: pinpointAppArn + ) + return try await self.createPushNotificationRegistration(input, logger: logger) + } + + /// Creates a new queue for the specified Amazon Connect instance. If the phone number is claimed to a traffic distribution group that was created in the same Region as the Amazon Connect instance where you are calling this API, then you can use a full phone number ARN or a UUID for OutboundCallerIdNumberId. However, if the phone number is claimed to a traffic distribution group that is in one Region, and you are calling this API from an instance in another Amazon Web Services Region that is associated with the traffic distribution group, you must provide a full phone number ARN. If a UUID is provided in this scenario, you will receive a ResourceNotFoundException. Only use the phone number ARN format that doesn't contain instance in the path, for example, arn:aws:connect:us-east-1:1234567890:phone-number/uuid. This is the same ARN format that is returned when you call the ListPhoneNumbersV2 API. If you plan to use IAM policies to allow/deny access to this API for phone number resources claimed to a traffic distribution group, see Allow or Deny queue API actions for phone numbers in a replica Region. @Sendable @inlinable public func createQueue(_ input: CreateQueueRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateQueueResponse { @@ -1500,7 +1591,7 @@ public struct Connect: AWSService { logger: logger ) } - /// This API is in preview release for Amazon Connect and is subject to change. Creates a new queue for the specified Amazon Connect instance. If the phone number is claimed to a traffic distribution group that was created in the same Region as the Amazon Connect instance where you are calling this API, then you can use a full phone number ARN or a UUID for OutboundCallerIdNumberId. However, if the phone number is claimed to a traffic distribution group that is in one Region, and you are calling this API from an instance in another Amazon Web Services Region that is associated with the traffic distribution group, you must provide a full phone number ARN. If a UUID is provided in this scenario, you will receive a ResourceNotFoundException. Only use the phone number ARN format that doesn't contain instance in the path, for example, arn:aws:connect:us-east-1:1234567890:phone-number/uuid. This is the same ARN format that is returned when you call the ListPhoneNumbersV2 API. If you plan to use IAM policies to allow/deny access to this API for phone number resources claimed to a traffic distribution group, see Allow or Deny queue API actions for phone numbers in a replica Region. + /// Creates a new queue for the specified Amazon Connect instance. If the phone number is claimed to a traffic distribution group that was created in the same Region as the Amazon Connect instance where you are calling this API, then you can use a full phone number ARN or a UUID for OutboundCallerIdNumberId. However, if the phone number is claimed to a traffic distribution group that is in one Region, and you are calling this API from an instance in another Amazon Web Services Region that is associated with the traffic distribution group, you must provide a full phone number ARN. If a UUID is provided in this scenario, you will receive a ResourceNotFoundException. Only use the phone number ARN format that doesn't contain instance in the path, for example, arn:aws:connect:us-east-1:1234567890:phone-number/uuid. This is the same ARN format that is returned when you call the ListPhoneNumbersV2 API. If you plan to use IAM policies to allow/deny access to this API for phone number resources claimed to a traffic distribution group, see Allow or Deny queue API actions for phone numbers in a replica Region. /// /// Parameters: /// - description: The description of the queue. @@ -2361,6 +2452,41 @@ public struct Connect: AWSService { return try await self.deleteHoursOfOperation(input, logger: logger) } + /// Deletes an hours of operation override in an Amazon Connect hours of operation resource + @Sendable + @inlinable + public func deleteHoursOfOperationOverride(_ input: DeleteHoursOfOperationOverrideRequest, logger: Logger = AWSClient.loggingDisabled) async throws { + try await self.client.execute( + operation: "DeleteHoursOfOperationOverride", + path: "/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides/{HoursOfOperationOverrideId}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes an hours of operation override in an Amazon Connect hours of operation resource + /// + /// Parameters: + /// - hoursOfOperationId: The identifier for the hours of operation. + /// - hoursOfOperationOverrideId: The identifier for the hours of operation override. + /// - instanceId: The identifier of the Amazon Connect instance. + /// - logger: Logger use during operation + @inlinable + public func deleteHoursOfOperationOverride( + hoursOfOperationId: String, + hoursOfOperationOverrideId: String, + instanceId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws { + let input = DeleteHoursOfOperationOverrideRequest( + hoursOfOperationId: hoursOfOperationId, + hoursOfOperationOverrideId: hoursOfOperationOverrideId, + instanceId: instanceId + ) + return try await self.deleteHoursOfOperationOverride(input, logger: logger) + } + /// This API is in preview release for Amazon Connect and is subject to change. Deletes the Amazon Connect instance. For more information, see Delete your Amazon Connect instance in the Amazon Connect Administrator Guide. Amazon Connect enforces a limit on the total number of instances that you can create or delete in 30 days. /// If you exceed this limit, you will get an error message indicating there has been an excessive number of attempts at creating or deleting instances. /// You must wait 30 days before you can restart creating and deleting instances in your account. @@ -2490,6 +2616,41 @@ public struct Connect: AWSService { return try await self.deletePrompt(input, logger: logger) } + /// Deletes registration for a device token and a chat contact. + @Sendable + @inlinable + public func deletePushNotificationRegistration(_ input: DeletePushNotificationRegistrationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeletePushNotificationRegistrationResponse { + try await self.client.execute( + operation: "DeletePushNotificationRegistration", + path: "/push-notification/{InstanceId}/registrations/{RegistrationId}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes registration for a device token and a chat contact. + /// + /// Parameters: + /// - contactId: The identifier of the contact within the Amazon Connect instance. + /// - instanceId: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. + /// - registrationId: The identifier for the registration. + /// - logger: Logger use during operation + @inlinable + public func deletePushNotificationRegistration( + contactId: String, + instanceId: String, + registrationId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DeletePushNotificationRegistrationResponse { + let input = DeletePushNotificationRegistrationRequest( + contactId: contactId, + instanceId: instanceId, + registrationId: registrationId + ) + return try await self.deletePushNotificationRegistration(input, logger: logger) + } + /// Deletes a queue. It isn't possible to delete a queue by using the Amazon Connect admin website. @Sendable @inlinable @@ -3202,6 +3363,41 @@ public struct Connect: AWSService { return try await self.describeHoursOfOperation(input, logger: logger) } + /// Describes the hours of operation override. + @Sendable + @inlinable + public func describeHoursOfOperationOverride(_ input: DescribeHoursOfOperationOverrideRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeHoursOfOperationOverrideResponse { + try await self.client.execute( + operation: "DescribeHoursOfOperationOverride", + path: "/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides/{HoursOfOperationOverrideId}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Describes the hours of operation override. + /// + /// Parameters: + /// - hoursOfOperationId: The identifier for the hours of operation. + /// - hoursOfOperationOverrideId: The identifier for the hours of operation override. + /// - instanceId: The identifier of the Amazon Connect instance. + /// - logger: Logger use during operation + @inlinable + public func describeHoursOfOperationOverride( + hoursOfOperationId: String, + hoursOfOperationOverrideId: String, + instanceId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DescribeHoursOfOperationOverrideResponse { + let input = DescribeHoursOfOperationOverrideRequest( + hoursOfOperationId: hoursOfOperationId, + hoursOfOperationOverrideId: hoursOfOperationOverrideId, + instanceId: instanceId + ) + return try await self.describeHoursOfOperationOverride(input, logger: logger) + } + /// This API is in preview release for Amazon Connect and is subject to change. Returns the current state of the specified instance identifier. It tracks the instance while it is being created and returns an error status, if applicable. If an instance is not created successfully, the instance status reason field returns details relevant to the reason. The instance in a failed state is returned only for 24 hours after the CreateInstance API was invoked. @Sendable @inlinable @@ -4370,6 +4566,44 @@ public struct Connect: AWSService { return try await self.getCurrentUserData(input, logger: logger) } + /// Get the hours of operations with the effective override applied. + @Sendable + @inlinable + public func getEffectiveHoursOfOperations(_ input: GetEffectiveHoursOfOperationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetEffectiveHoursOfOperationsResponse { + try await self.client.execute( + operation: "GetEffectiveHoursOfOperations", + path: "/effective-hours-of-operations/{InstanceId}/{HoursOfOperationId}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Get the hours of operations with the effective override applied. + /// + /// Parameters: + /// - fromDate: The Date from when the hours of operation are listed. + /// - hoursOfOperationId: The identifier for the hours of operation. + /// - instanceId: The identifier of the Amazon Connect instance. + /// - toDate: The Date until when the hours of operation are listed. + /// - logger: Logger use during operation + @inlinable + public func getEffectiveHoursOfOperations( + fromDate: String, + hoursOfOperationId: String, + instanceId: String, + toDate: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetEffectiveHoursOfOperationsResponse { + let input = GetEffectiveHoursOfOperationsRequest( + fromDate: fromDate, + hoursOfOperationId: hoursOfOperationId, + instanceId: instanceId, + toDate: toDate + ) + return try await self.getEffectiveHoursOfOperations(input, logger: logger) + } + /// Supports SAML sign-in for Amazon Connect. Retrieves a token for federation. The token is for the Amazon Connect user which corresponds to the IAM credentials that were used to invoke this action. For more information about how SAML sign-in works in Amazon Connect, see Configure SAML with IAM for Amazon Connect in the Amazon Connect Administrator Guide. This API doesn't support root users. If you try to invoke GetFederationToken with root credentials, an error message similar to the following one appears: Provided identity: Principal: .... User: .... cannot be used for federation with Amazon Connect @Sendable @inlinable @@ -5234,6 +5468,44 @@ public struct Connect: AWSService { return try await self.listFlowAssociations(input, logger: logger) } + /// List the hours of operation overrides. + @Sendable + @inlinable + public func listHoursOfOperationOverrides(_ input: ListHoursOfOperationOverridesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListHoursOfOperationOverridesResponse { + try await self.client.execute( + operation: "ListHoursOfOperationOverrides", + path: "/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// List the hours of operation overrides. + /// + /// Parameters: + /// - hoursOfOperationId: The identifier for the hours of operation + /// - instanceId: The identifier of the Amazon Connect instance. + /// - maxResults: The maximum number of results to return per page. The default MaxResult size is 100. Valid Range: Minimum value of 1. Maximum value of 1000. + /// - nextToken: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + /// - logger: Logger use during operation + @inlinable + public func listHoursOfOperationOverrides( + hoursOfOperationId: String, + instanceId: String, + maxResults: Int? = nil, + nextToken: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListHoursOfOperationOverridesResponse { + let input = ListHoursOfOperationOverridesRequest( + hoursOfOperationId: hoursOfOperationId, + instanceId: instanceId, + maxResults: maxResults, + nextToken: nextToken + ) + return try await self.listHoursOfOperationOverrides(input, logger: logger) + } + /// Provides information about the hours of operation for the specified Amazon Connect instance. For more information about hours of operation, see Set the Hours of Operation for a Queue in the Amazon Connect Administrator Guide. @Sendable @inlinable @@ -6932,6 +7204,47 @@ public struct Connect: AWSService { return try await self.searchEmailAddresses(input, logger: logger) } + /// Searches the hours of operation overrides. + @Sendable + @inlinable + public func searchHoursOfOperationOverrides(_ input: SearchHoursOfOperationOverridesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SearchHoursOfOperationOverridesResponse { + try await self.client.execute( + operation: "SearchHoursOfOperationOverrides", + path: "/search-hours-of-operation-overrides", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Searches the hours of operation overrides. + /// + /// Parameters: + /// - instanceId: The identifier of the Amazon Connect instance. + /// - maxResults: The maximum number of results to return per page. Valid Range: Minimum value of 1. Maximum value of 100. + /// - nextToken: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. Length Constraints: Minimum length of 1. Maximum length of 2500. + /// - searchCriteria: The search criteria to be used to return hours of operations overrides. + /// - searchFilter: + /// - logger: Logger use during operation + @inlinable + public func searchHoursOfOperationOverrides( + instanceId: String, + maxResults: Int? = nil, + nextToken: String? = nil, + searchCriteria: HoursOfOperationOverrideSearchCriteria? = nil, + searchFilter: HoursOfOperationSearchFilter? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> SearchHoursOfOperationOverridesResponse { + let input = SearchHoursOfOperationOverridesRequest( + instanceId: instanceId, + maxResults: maxResults, + nextToken: nextToken, + searchCriteria: searchCriteria, + searchFilter: searchFilter + ) + return try await self.searchHoursOfOperationOverrides(input, logger: logger) + } + /// Searches the hours of operation in an Amazon Connect instance, with optional filtering. @Sendable @inlinable @@ -9089,6 +9402,56 @@ public struct Connect: AWSService { return try await self.updateHoursOfOperation(input, logger: logger) } + /// Update the hours of operation override. + @Sendable + @inlinable + public func updateHoursOfOperationOverride(_ input: UpdateHoursOfOperationOverrideRequest, logger: Logger = AWSClient.loggingDisabled) async throws { + try await self.client.execute( + operation: "UpdateHoursOfOperationOverride", + path: "/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides/{HoursOfOperationOverrideId}", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Update the hours of operation override. + /// + /// Parameters: + /// - config: Configuration information for the hours of operation override: day, start time, and end time. + /// - description: The description of the hours of operation override. + /// - effectiveFrom: The date from when the hours of operation override would be effective. + /// - effectiveTill: The date till when the hours of operation override would be effective. + /// - hoursOfOperationId: The identifier for the hours of operation. + /// - hoursOfOperationOverrideId: The identifier for the hours of operation override. + /// - instanceId: The identifier of the Amazon Connect instance. + /// - name: The name of the hours of operation override. + /// - logger: Logger use during operation + @inlinable + public func updateHoursOfOperationOverride( + config: [HoursOfOperationOverrideConfig]? = nil, + description: String? = nil, + effectiveFrom: String? = nil, + effectiveTill: String? = nil, + hoursOfOperationId: String, + hoursOfOperationOverrideId: String, + instanceId: String, + name: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws { + let input = UpdateHoursOfOperationOverrideRequest( + config: config, + description: description, + effectiveFrom: effectiveFrom, + effectiveTill: effectiveTill, + hoursOfOperationId: hoursOfOperationId, + hoursOfOperationOverrideId: hoursOfOperationOverrideId, + instanceId: instanceId, + name: name + ) + return try await self.updateHoursOfOperationOverride(input, logger: logger) + } + /// This API is in preview release for Amazon Connect and is subject to change. Updates the value for the specified attribute type. @Sendable @inlinable @@ -11076,6 +11439,46 @@ extension Connect { return self.listFlowAssociationsPaginator(input, logger: logger) } + /// Return PaginatorSequence for operation ``listHoursOfOperationOverrides(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listHoursOfOperationOverridesPaginator( + _ input: ListHoursOfOperationOverridesRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listHoursOfOperationOverrides, + inputKey: \ListHoursOfOperationOverridesRequest.nextToken, + outputKey: \ListHoursOfOperationOverridesResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listHoursOfOperationOverrides(_:logger:)``. + /// + /// - Parameters: + /// - hoursOfOperationId: The identifier for the hours of operation + /// - instanceId: The identifier of the Amazon Connect instance. + /// - maxResults: The maximum number of results to return per page. The default MaxResult size is 100. Valid Range: Minimum value of 1. Maximum value of 1000. + /// - logger: Logger used for logging + @inlinable + public func listHoursOfOperationOverridesPaginator( + hoursOfOperationId: String, + instanceId: String, + maxResults: Int? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListHoursOfOperationOverridesRequest( + hoursOfOperationId: hoursOfOperationId, + instanceId: instanceId, + maxResults: maxResults + ) + return self.listHoursOfOperationOverridesPaginator(input, logger: logger) + } + /// Return PaginatorSequence for operation ``listHoursOfOperations(_:logger:)``. /// /// - Parameters: @@ -12522,6 +12925,49 @@ extension Connect { return self.searchContactsPaginator(input, logger: logger) } + /// Return PaginatorSequence for operation ``searchHoursOfOperationOverrides(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func searchHoursOfOperationOverridesPaginator( + _ input: SearchHoursOfOperationOverridesRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.searchHoursOfOperationOverrides, + inputKey: \SearchHoursOfOperationOverridesRequest.nextToken, + outputKey: \SearchHoursOfOperationOverridesResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``searchHoursOfOperationOverrides(_:logger:)``. + /// + /// - Parameters: + /// - instanceId: The identifier of the Amazon Connect instance. + /// - maxResults: The maximum number of results to return per page. Valid Range: Minimum value of 1. Maximum value of 100. + /// - searchCriteria: The search criteria to be used to return hours of operations overrides. + /// - searchFilter: + /// - logger: Logger used for logging + @inlinable + public func searchHoursOfOperationOverridesPaginator( + instanceId: String, + maxResults: Int? = nil, + searchCriteria: HoursOfOperationOverrideSearchCriteria? = nil, + searchFilter: HoursOfOperationSearchFilter? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = SearchHoursOfOperationOverridesRequest( + instanceId: instanceId, + maxResults: maxResults, + searchCriteria: searchCriteria, + searchFilter: searchFilter + ) + return self.searchHoursOfOperationOverridesPaginator(input, logger: logger) + } + /// Return PaginatorSequence for operation ``searchHoursOfOperations(_:logger:)``. /// /// - Parameters: @@ -13208,6 +13654,18 @@ extension Connect.ListFlowAssociationsRequest: AWSPaginateToken { } } +extension Connect.ListHoursOfOperationOverridesRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> Connect.ListHoursOfOperationOverridesRequest { + return .init( + hoursOfOperationId: self.hoursOfOperationId, + instanceId: self.instanceId, + maxResults: self.maxResults, + nextToken: token + ) + } +} + extension Connect.ListHoursOfOperationsRequest: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> Connect.ListHoursOfOperationsRequest { @@ -13642,6 +14100,19 @@ extension Connect.SearchContactsRequest: AWSPaginateToken { } } +extension Connect.SearchHoursOfOperationOverridesRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> Connect.SearchHoursOfOperationOverridesRequest { + return .init( + instanceId: self.instanceId, + maxResults: self.maxResults, + nextToken: token, + searchCriteria: self.searchCriteria, + searchFilter: self.searchFilter + ) + } +} + extension Connect.SearchHoursOfOperationsRequest: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> Connect.SearchHoursOfOperationsRequest { diff --git a/Sources/Soto/Services/Connect/Connect_shapes.swift b/Sources/Soto/Services/Connect/Connect_shapes.swift index 2fdc5294a3..7067d5be71 100644 --- a/Sources/Soto/Services/Connect/Connect_shapes.swift +++ b/Sources/Soto/Services/Connect/Connect_shapes.swift @@ -198,6 +198,22 @@ extension Connect { public var description: String { return self.rawValue } } + public enum DateComparisonType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case equalTo = "EQUAL_TO" + case greaterThan = "GREATER_THAN" + case greaterThanOrEqualTo = "GREATER_THAN_OR_EQUAL_TO" + case lessThan = "LESS_THAN" + case lessThanOrEqualTo = "LESS_THAN_OR_EQUAL_TO" + public var description: String { return self.rawValue } + } + + public enum DeviceType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case apns = "APNS" + case apnsSandbox = "APNS_SANDBOX" + case gcm = "GCM" + public var description: String { return self.rawValue } + } + public enum DirectoryType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case connectManaged = "CONNECT_MANAGED" case existingDirectory = "EXISTING_DIRECTORY" @@ -528,6 +544,17 @@ extension Connect { public var description: String { return self.rawValue } } + public enum OverrideDays: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case friday = "FRIDAY" + case monday = "MONDAY" + case saturday = "SATURDAY" + case sunday = "SUNDAY" + case thursday = "THURSDAY" + case tuesday = "TUESDAY" + case wednesday = "WEDNESDAY" + public var description: String { return self.rawValue } + } + public enum ParticipantRole: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case agent = "AGENT" case customBot = "CUSTOM_BOT" @@ -3687,6 +3714,33 @@ extension Connect { } } + public struct ContactConfiguration: AWSEncodableShape { + /// The identifier of the contact within the Amazon Connect instance. + public let contactId: String + /// Whether to include raw connect message in the push notification payload. Default is False. + public let includeRawMessage: Bool? + /// The role of the participant in the chat conversation. Only CUSTOMER is currently supported. Any other values other than CUSTOMER will result in an exception (4xx error). + public let participantRole: ParticipantRole? + + @inlinable + public init(contactId: String, includeRawMessage: Bool? = nil, participantRole: ParticipantRole? = nil) { + self.contactId = contactId + self.includeRawMessage = includeRawMessage + self.participantRole = participantRole + } + + public func validate(name: String) throws { + try self.validate(self.contactId, name: "contactId", parent: name, max: 256) + try self.validate(self.contactId, name: "contactId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case contactId = "ContactId" + case includeRawMessage = "IncludeRawMessage" + case participantRole = "ParticipantRole" + } + } + public struct ContactDataRequest: AWSEncodableShape { /// List of attributes to be stored in a contact. public let attributes: [String: String]? @@ -3864,18 +3918,26 @@ extension Connect { public let andConditions: [ContactFlowModuleSearchCriteria]? /// A list of conditions which would be applied together with an OR condition. public let orConditions: [ContactFlowModuleSearchCriteria]? + /// The state of the flow. + public let stateCondition: ContactFlowModuleState? + /// The status of the flow. + public let statusCondition: ContactFlowModuleStatus? public let stringCondition: StringCondition? @inlinable - public init(andConditions: [ContactFlowModuleSearchCriteria]? = nil, orConditions: [ContactFlowModuleSearchCriteria]? = nil, stringCondition: StringCondition? = nil) { + public init(andConditions: [ContactFlowModuleSearchCriteria]? = nil, orConditions: [ContactFlowModuleSearchCriteria]? = nil, stateCondition: ContactFlowModuleState? = nil, statusCondition: ContactFlowModuleStatus? = nil, stringCondition: StringCondition? = nil) { self.andConditions = andConditions self.orConditions = orConditions + self.stateCondition = stateCondition + self.statusCondition = statusCondition self.stringCondition = stringCondition } private enum CodingKeys: String, CodingKey { case andConditions = "AndConditions" case orConditions = "OrConditions" + case stateCondition = "StateCondition" + case statusCondition = "StatusCondition" case stringCondition = "StringCondition" } } @@ -4804,6 +4866,81 @@ extension Connect { } } + public struct CreateHoursOfOperationOverrideRequest: AWSEncodableShape { + /// Configuration information for the hours of operation override: day, start time, and end time. + public let config: [HoursOfOperationOverrideConfig] + /// The description of the hours of operation override. + public let description: String? + /// The date from when the hours of operation override would be effective. + public let effectiveFrom: String + /// The date until when the hours of operation override would be effective. + public let effectiveTill: String + /// The identifier for the hours of operation + public let hoursOfOperationId: String + /// The identifier of the Amazon Connect instance. + public let instanceId: String + /// The name of the hours of operation override. + public let name: String + + @inlinable + public init(config: [HoursOfOperationOverrideConfig], description: String? = nil, effectiveFrom: String, effectiveTill: String, hoursOfOperationId: String, instanceId: String, name: String) { + self.config = config + self.description = description + self.effectiveFrom = effectiveFrom + self.effectiveTill = effectiveTill + self.hoursOfOperationId = hoursOfOperationId + self.instanceId = instanceId + self.name = name + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encode(self.config, forKey: .config) + try container.encodeIfPresent(self.description, forKey: .description) + try container.encode(self.effectiveFrom, forKey: .effectiveFrom) + try container.encode(self.effectiveTill, forKey: .effectiveTill) + request.encodePath(self.hoursOfOperationId, key: "HoursOfOperationId") + request.encodePath(self.instanceId, key: "InstanceId") + try container.encode(self.name, forKey: .name) + } + + public func validate(name: String) throws { + try self.config.forEach { + try $0.validate(name: "\(name).config[]") + } + try self.validate(self.config, name: "config", parent: name, max: 100) + try self.validate(self.description, name: "description", parent: name, pattern: "^[\\P{C}\\r\\n\\t]{1,250}$") + try self.validate(self.effectiveFrom, name: "effectiveFrom", parent: name, pattern: "^\\d{4}-\\d{2}-\\d{2}$") + try self.validate(self.effectiveTill, name: "effectiveTill", parent: name, pattern: "^\\d{4}-\\d{2}-\\d{2}$") + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 100) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[\\P{C}\\r\\n\\t]{1,127}$") + } + + private enum CodingKeys: String, CodingKey { + case config = "Config" + case description = "Description" + case effectiveFrom = "EffectiveFrom" + case effectiveTill = "EffectiveTill" + case name = "Name" + } + } + + public struct CreateHoursOfOperationOverrideResponse: AWSDecodableShape { + /// The identifier for the hours of operation override. + public let hoursOfOperationOverrideId: String? + + @inlinable + public init(hoursOfOperationOverrideId: String? = nil) { + self.hoursOfOperationOverrideId = hoursOfOperationOverrideId + } + + private enum CodingKeys: String, CodingKey { + case hoursOfOperationOverrideId = "HoursOfOperationOverrideId" + } + } + public struct CreateHoursOfOperationRequest: AWSEncodableShape { /// Configuration information for the hours of operation: day, start time, and end time. public let config: [HoursOfOperationConfig] @@ -5276,6 +5413,73 @@ extension Connect { } } + public struct CreatePushNotificationRegistrationRequest: AWSEncodableShape { + /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. + public let clientToken: String? + /// The contact configuration for push notification registration. + public let contactConfiguration: ContactConfiguration + /// The push notification token issued by the Apple or Google gateways. + public let deviceToken: String + /// The device type to use when sending the message. + public let deviceType: DeviceType + /// The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. + public let instanceId: String + /// The Amazon Resource Name (ARN) of the Pinpoint application. + public let pinpointAppArn: String + + @inlinable + public init(clientToken: String? = CreatePushNotificationRegistrationRequest.idempotencyToken(), contactConfiguration: ContactConfiguration, deviceToken: String, deviceType: DeviceType, instanceId: String, pinpointAppArn: String) { + self.clientToken = clientToken + self.contactConfiguration = contactConfiguration + self.deviceToken = deviceToken + self.deviceType = deviceType + self.instanceId = instanceId + self.pinpointAppArn = pinpointAppArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + try container.encode(self.contactConfiguration, forKey: .contactConfiguration) + try container.encode(self.deviceToken, forKey: .deviceToken) + try container.encode(self.deviceType, forKey: .deviceType) + request.encodePath(self.instanceId, key: "InstanceId") + try container.encode(self.pinpointAppArn, forKey: .pinpointAppArn) + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 500) + try self.contactConfiguration.validate(name: "\(name).contactConfiguration") + try self.validate(self.deviceToken, name: "deviceToken", parent: name, max: 500) + try self.validate(self.deviceToken, name: "deviceToken", parent: name, min: 1) + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 100) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "ClientToken" + case contactConfiguration = "ContactConfiguration" + case deviceToken = "DeviceToken" + case deviceType = "DeviceType" + case pinpointAppArn = "PinpointAppArn" + } + } + + public struct CreatePushNotificationRegistrationResponse: AWSDecodableShape { + /// The identifier for the registration. + public let registrationId: String + + @inlinable + public init(registrationId: String) { + self.registrationId = registrationId + } + + private enum CodingKeys: String, CodingKey { + case registrationId = "RegistrationId" + } + } + public struct CreateQueueRequest: AWSEncodableShape { /// The description of the queue. public let description: String? @@ -6548,6 +6752,32 @@ extension Connect { } } + public struct DateCondition: AWSEncodableShape { + /// An object to specify the hours of operation override date condition comparisonType. + public let comparisonType: DateComparisonType? + /// An object to specify the hours of operation override date field. + public let fieldName: String? + /// An object to specify the hours of operation override date value. + public let value: String? + + @inlinable + public init(comparisonType: DateComparisonType? = nil, fieldName: String? = nil, value: String? = nil) { + self.comparisonType = comparisonType + self.fieldName = fieldName + self.value = value + } + + public func validate(name: String) throws { + try self.validate(self.value, name: "value", parent: name, pattern: "^\\d{4}-\\d{2}-\\d{2}$") + } + + private enum CodingKeys: String, CodingKey { + case comparisonType = "ComparisonType" + case fieldName = "FieldName" + case value = "Value" + } + } + public struct DateReference: AWSDecodableShape { /// Identifier of the date reference. public let name: String? @@ -6849,6 +7079,39 @@ extension Connect { private enum CodingKeys: CodingKey {} } + public struct DeleteHoursOfOperationOverrideRequest: AWSEncodableShape { + /// The identifier for the hours of operation. + public let hoursOfOperationId: String + /// The identifier for the hours of operation override. + public let hoursOfOperationOverrideId: String + /// The identifier of the Amazon Connect instance. + public let instanceId: String + + @inlinable + public init(hoursOfOperationId: String, hoursOfOperationOverrideId: String, instanceId: String) { + self.hoursOfOperationId = hoursOfOperationId + self.hoursOfOperationOverrideId = hoursOfOperationOverrideId + self.instanceId = instanceId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.hoursOfOperationId, key: "HoursOfOperationId") + request.encodePath(self.hoursOfOperationOverrideId, key: "HoursOfOperationOverrideId") + request.encodePath(self.instanceId, key: "InstanceId") + } + + public func validate(name: String) throws { + try self.validate(self.hoursOfOperationOverrideId, name: "hoursOfOperationOverrideId", parent: name, max: 36) + try self.validate(self.hoursOfOperationOverrideId, name: "hoursOfOperationOverrideId", parent: name, min: 1) + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 100) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + public struct DeleteHoursOfOperationRequest: AWSEncodableShape { /// The identifier for the hours of operation. public let hoursOfOperationId: String @@ -6986,6 +7249,45 @@ extension Connect { private enum CodingKeys: CodingKey {} } + public struct DeletePushNotificationRegistrationRequest: AWSEncodableShape { + /// The identifier of the contact within the Amazon Connect instance. + public let contactId: String + /// The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. + public let instanceId: String + /// The identifier for the registration. + public let registrationId: String + + @inlinable + public init(contactId: String, instanceId: String, registrationId: String) { + self.contactId = contactId + self.instanceId = instanceId + self.registrationId = registrationId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.contactId, key: "contactId") + request.encodePath(self.instanceId, key: "InstanceId") + request.encodePath(self.registrationId, key: "RegistrationId") + } + + public func validate(name: String) throws { + try self.validate(self.contactId, name: "contactId", parent: name, max: 256) + try self.validate(self.contactId, name: "contactId", parent: name, min: 1) + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 100) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 1) + try self.validate(self.registrationId, name: "registrationId", parent: name, max: 256) + try self.validate(self.registrationId, name: "registrationId", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeletePushNotificationRegistrationResponse: AWSDecodableShape { + public init() {} + } + public struct DeleteQueueRequest: AWSEncodableShape { /// The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. public let instanceId: String @@ -7775,6 +8077,53 @@ extension Connect { } } + public struct DescribeHoursOfOperationOverrideRequest: AWSEncodableShape { + /// The identifier for the hours of operation. + public let hoursOfOperationId: String + /// The identifier for the hours of operation override. + public let hoursOfOperationOverrideId: String + /// The identifier of the Amazon Connect instance. + public let instanceId: String + + @inlinable + public init(hoursOfOperationId: String, hoursOfOperationOverrideId: String, instanceId: String) { + self.hoursOfOperationId = hoursOfOperationId + self.hoursOfOperationOverrideId = hoursOfOperationOverrideId + self.instanceId = instanceId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.hoursOfOperationId, key: "HoursOfOperationId") + request.encodePath(self.hoursOfOperationOverrideId, key: "HoursOfOperationOverrideId") + request.encodePath(self.instanceId, key: "InstanceId") + } + + public func validate(name: String) throws { + try self.validate(self.hoursOfOperationOverrideId, name: "hoursOfOperationOverrideId", parent: name, max: 36) + try self.validate(self.hoursOfOperationOverrideId, name: "hoursOfOperationOverrideId", parent: name, min: 1) + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 100) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct DescribeHoursOfOperationOverrideResponse: AWSDecodableShape { + /// Information about the hours of operations override. + public let hoursOfOperationOverride: HoursOfOperationOverride? + + @inlinable + public init(hoursOfOperationOverride: HoursOfOperationOverride? = nil) { + self.hoursOfOperationOverride = hoursOfOperationOverride + } + + private enum CodingKeys: String, CodingKey { + case hoursOfOperationOverride = "HoursOfOperationOverride" + } + } + public struct DescribeHoursOfOperationRequest: AWSEncodableShape { /// The identifier for the hours of operation. public let hoursOfOperationId: String @@ -9099,6 +9448,24 @@ extension Connect { } } + public struct EffectiveHoursOfOperations: AWSDecodableShape { + /// The date that the hours of operation or overrides applies to. + public let date: String? + /// Information about the hours of operations with the effective override applied. + public let operationalHours: [OperationalHour]? + + @inlinable + public init(date: String? = nil, operationalHours: [OperationalHour]? = nil) { + self.date = date + self.operationalHours = operationalHours + } + + private enum CodingKeys: String, CodingKey { + case date = "Date" + case operationalHours = "OperationalHours" + } + } + public struct EmailAddressInfo: AWSEncodableShape { /// The display name of email address. public let displayName: String? @@ -10531,25 +10898,80 @@ extension Connect { } } - public struct GetCurrentUserDataResponse: AWSDecodableShape { - /// The total count of the result, regardless of the current page size. - public let approximateTotalCount: Int64? - /// If there are additional results, this is the token for the next set of results. - public let nextToken: String? - /// A list of the user data that is returned. - public let userDataList: [UserData]? + public struct GetCurrentUserDataResponse: AWSDecodableShape { + /// The total count of the result, regardless of the current page size. + public let approximateTotalCount: Int64? + /// If there are additional results, this is the token for the next set of results. + public let nextToken: String? + /// A list of the user data that is returned. + public let userDataList: [UserData]? + + @inlinable + public init(approximateTotalCount: Int64? = nil, nextToken: String? = nil, userDataList: [UserData]? = nil) { + self.approximateTotalCount = approximateTotalCount + self.nextToken = nextToken + self.userDataList = userDataList + } + + private enum CodingKeys: String, CodingKey { + case approximateTotalCount = "ApproximateTotalCount" + case nextToken = "NextToken" + case userDataList = "UserDataList" + } + } + + public struct GetEffectiveHoursOfOperationsRequest: AWSEncodableShape { + /// The Date from when the hours of operation are listed. + public let fromDate: String + /// The identifier for the hours of operation. + public let hoursOfOperationId: String + /// The identifier of the Amazon Connect instance. + public let instanceId: String + /// The Date until when the hours of operation are listed. + public let toDate: String + + @inlinable + public init(fromDate: String, hoursOfOperationId: String, instanceId: String, toDate: String) { + self.fromDate = fromDate + self.hoursOfOperationId = hoursOfOperationId + self.instanceId = instanceId + self.toDate = toDate + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.fromDate, key: "fromDate") + request.encodePath(self.hoursOfOperationId, key: "HoursOfOperationId") + request.encodePath(self.instanceId, key: "InstanceId") + request.encodeQuery(self.toDate, key: "toDate") + } + + public func validate(name: String) throws { + try self.validate(self.fromDate, name: "fromDate", parent: name, pattern: "^\\d{4}-\\d{2}-\\d{2}$") + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 100) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 1) + try self.validate(self.toDate, name: "toDate", parent: name, pattern: "^\\d{4}-\\d{2}-\\d{2}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetEffectiveHoursOfOperationsResponse: AWSDecodableShape { + /// Information about the effective hours of operations + public let effectiveHoursOfOperationList: [EffectiveHoursOfOperations]? + /// The time zone for the hours of operation. + public let timeZone: String? @inlinable - public init(approximateTotalCount: Int64? = nil, nextToken: String? = nil, userDataList: [UserData]? = nil) { - self.approximateTotalCount = approximateTotalCount - self.nextToken = nextToken - self.userDataList = userDataList + public init(effectiveHoursOfOperationList: [EffectiveHoursOfOperations]? = nil, timeZone: String? = nil) { + self.effectiveHoursOfOperationList = effectiveHoursOfOperationList + self.timeZone = timeZone } private enum CodingKeys: String, CodingKey { - case approximateTotalCount = "ApproximateTotalCount" - case nextToken = "NextToken" - case userDataList = "UserDataList" + case effectiveHoursOfOperationList = "EffectiveHoursOfOperationList" + case timeZone = "TimeZone" } } @@ -11463,6 +11885,110 @@ extension Connect { } } + public struct HoursOfOperationOverride: AWSDecodableShape { + /// Configuration information for the hours of operation override: day, start time, and end time. + public let config: [HoursOfOperationOverrideConfig]? + /// The description of the hours of operation override. + public let description: String? + /// The date from which the hours of operation override would be effective. + public let effectiveFrom: String? + /// The date till which the hours of operation override would be effective. + public let effectiveTill: String? + /// The Amazon Resource Name (ARN) for the hours of operation. + public let hoursOfOperationArn: String? + /// The identifier for the hours of operation. + public let hoursOfOperationId: String? + /// The identifier for the hours of operation override. + public let hoursOfOperationOverrideId: String? + /// The name of the hours of operation override. + public let name: String? + + @inlinable + public init(config: [HoursOfOperationOverrideConfig]? = nil, description: String? = nil, effectiveFrom: String? = nil, effectiveTill: String? = nil, hoursOfOperationArn: String? = nil, hoursOfOperationId: String? = nil, hoursOfOperationOverrideId: String? = nil, name: String? = nil) { + self.config = config + self.description = description + self.effectiveFrom = effectiveFrom + self.effectiveTill = effectiveTill + self.hoursOfOperationArn = hoursOfOperationArn + self.hoursOfOperationId = hoursOfOperationId + self.hoursOfOperationOverrideId = hoursOfOperationOverrideId + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case config = "Config" + case description = "Description" + case effectiveFrom = "EffectiveFrom" + case effectiveTill = "EffectiveTill" + case hoursOfOperationArn = "HoursOfOperationArn" + case hoursOfOperationId = "HoursOfOperationId" + case hoursOfOperationOverrideId = "HoursOfOperationOverrideId" + case name = "Name" + } + } + + public struct HoursOfOperationOverrideConfig: AWSEncodableShape & AWSDecodableShape { + /// The day that the hours of operation override applies to. + public let day: OverrideDays? + /// The end time that your contact center closes if overrides are applied. + public let endTime: OverrideTimeSlice? + /// The start time when your contact center opens if overrides are applied. + public let startTime: OverrideTimeSlice? + + @inlinable + public init(day: OverrideDays? = nil, endTime: OverrideTimeSlice? = nil, startTime: OverrideTimeSlice? = nil) { + self.day = day + self.endTime = endTime + self.startTime = startTime + } + + public func validate(name: String) throws { + try self.endTime?.validate(name: "\(name).endTime") + try self.startTime?.validate(name: "\(name).startTime") + } + + private enum CodingKeys: String, CodingKey { + case day = "Day" + case endTime = "EndTime" + case startTime = "StartTime" + } + } + + public struct HoursOfOperationOverrideSearchCriteria: AWSEncodableShape { + /// A list of conditions which would be applied together with an AND condition. + public let andConditions: [HoursOfOperationOverrideSearchCriteria]? + /// A leaf node condition which can be used to specify a date condition. + public let dateCondition: DateCondition? + /// A list of conditions which would be applied together with an OR condition. + public let orConditions: [HoursOfOperationOverrideSearchCriteria]? + public let stringCondition: StringCondition? + + @inlinable + public init(andConditions: [HoursOfOperationOverrideSearchCriteria]? = nil, dateCondition: DateCondition? = nil, orConditions: [HoursOfOperationOverrideSearchCriteria]? = nil, stringCondition: StringCondition? = nil) { + self.andConditions = andConditions + self.dateCondition = dateCondition + self.orConditions = orConditions + self.stringCondition = stringCondition + } + + public func validate(name: String) throws { + try self.andConditions?.forEach { + try $0.validate(name: "\(name).andConditions[]") + } + try self.dateCondition?.validate(name: "\(name).dateCondition") + try self.orConditions?.forEach { + try $0.validate(name: "\(name).orConditions[]") + } + } + + private enum CodingKeys: String, CodingKey { + case andConditions = "AndConditions" + case dateCondition = "DateCondition" + case orConditions = "OrConditions" + case stringCondition = "StringCondition" + } + } + public struct HoursOfOperationSearchCriteria: AWSEncodableShape { /// A list of conditions which would be applied together with an AND condition. public let andConditions: [HoursOfOperationSearchCriteria]? @@ -12925,6 +13451,69 @@ extension Connect { } } + public struct ListHoursOfOperationOverridesRequest: AWSEncodableShape { + /// The identifier for the hours of operation + public let hoursOfOperationId: String + /// The identifier of the Amazon Connect instance. + public let instanceId: String + /// The maximum number of results to return per page. The default MaxResult size is 100. Valid Range: Minimum value of 1. Maximum value of 1000. + public let maxResults: Int? + /// The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + public let nextToken: String? + + @inlinable + public init(hoursOfOperationId: String, instanceId: String, maxResults: Int? = nil, nextToken: String? = nil) { + self.hoursOfOperationId = hoursOfOperationId + self.instanceId = instanceId + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.hoursOfOperationId, key: "HoursOfOperationId") + request.encodePath(self.instanceId, key: "InstanceId") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 100) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 1) + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListHoursOfOperationOverridesResponse: AWSDecodableShape { + /// Information about the hours of operation override. + public let hoursOfOperationOverrideList: [HoursOfOperationOverride]? + /// The AWS Region where this resource was last modified. + public let lastModifiedRegion: String? + /// The timestamp when this resource was last modified. + public let lastModifiedTime: Date? + /// The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + public let nextToken: String? + + @inlinable + public init(hoursOfOperationOverrideList: [HoursOfOperationOverride]? = nil, lastModifiedRegion: String? = nil, lastModifiedTime: Date? = nil, nextToken: String? = nil) { + self.hoursOfOperationOverrideList = hoursOfOperationOverrideList + self.lastModifiedRegion = lastModifiedRegion + self.lastModifiedTime = lastModifiedTime + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case hoursOfOperationOverrideList = "HoursOfOperationOverrideList" + case lastModifiedRegion = "LastModifiedRegion" + case lastModifiedTime = "LastModifiedTime" + case nextToken = "NextToken" + } + } + public struct ListHoursOfOperationsRequest: AWSEncodableShape { /// The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. public let instanceId: String @@ -14926,7 +15515,7 @@ extension Connect { public struct MetricFilterV2: AWSEncodableShape & AWSDecodableShape { /// The key to use for filtering data. Valid metric filter keys: ANSWERING_MACHINE_DETECTION_STATUS CASE_STATUS DISCONNECT_REASON FLOWS_ACTION_IDENTIFIER FLOWS_NEXT_ACTION_IDENTIFIER FLOWS_OUTCOME_TYPE FLOWS_RESOURCE_TYPE INITIATION_METHOD public let metricFilterKey: String? - /// The values to use for filtering data. Values for metric-level filters can be either a fixed set of values or a customized list, depending on the use case. For valid values of metric-level filters INITIATION_METHOD, DISCONNECT_REASON, and ANSWERING_MACHINE_DETECTION_STATUS, see ContactTraceRecord in the Amazon Connect Administrator Guide. For valid values of the metric-level filter FLOWS_OUTCOME_TYPE, see the description for the Flow outcome metric in the Amazon Connect Administrator Guide. For valid values of the metric-level filter BOT_CONVERSATION_OUTCOME_TYPE, see the description for the Bot conversations completed in the Amazon Connect Administrator Guide. For valid values of the metric-level filter BOT_INTENT_OUTCOME_TYPE, see the description for the Bot intents completed metric in the Amazon Connect Administrator Guide. + /// The values to use for filtering data. Values for metric-level filters can be either a fixed set of values or a customized list, depending on the use case. For valid values of metric-level filters INITIATION_METHOD, DISCONNECT_REASON, and ANSWERING_MACHINE_DETECTION_STATUS, see ContactTraceRecord in the Amazon Connect Administrator Guide. For valid values of the metric-level filter FLOWS_OUTCOME_TYPE, see the description for the Flow outcome metric in the Amazon Connect Administrator Guide. For valid values of the metric-level filter BOT_CONVERSATION_OUTCOME_TYPE, see the description for the Bot conversations completed in the Amazon Connect Administrator Guide. For valid values of the metric-level filter BOT_INTENT_OUTCOME_TYPE, see the description for the Bot intents completed metric in the Amazon Connect Administrator Guide. public let metricFilterValues: [String]? /// If set to true, the API response contains results that filter out the results matched by the metric-level filters condition. By default, Negate is set to false. public let negate: Bool? @@ -15200,6 +15789,24 @@ extension Connect { } } + public struct OperationalHour: AWSDecodableShape { + /// The end time that your contact center closes. + public let end: OverrideTimeSlice? + /// The start time that your contact center opens. + public let start: OverrideTimeSlice? + + @inlinable + public init(end: OverrideTimeSlice? = nil, start: OverrideTimeSlice? = nil) { + self.end = end + self.start = start + } + + private enum CodingKeys: String, CodingKey { + case end = "End" + case start = "Start" + } + } + public struct OutboundAdditionalRecipients: AWSEncodableShape { /// The additional CC email address recipients information. public let ccEmailAddresses: [EmailAddressInfo]? @@ -15327,6 +15934,31 @@ extension Connect { } } + public struct OverrideTimeSlice: AWSEncodableShape & AWSDecodableShape { + /// The hours. + public let hours: Int + /// The minutes. + public let minutes: Int + + @inlinable + public init(hours: Int, minutes: Int) { + self.hours = hours + self.minutes = minutes + } + + public func validate(name: String) throws { + try self.validate(self.hours, name: "hours", parent: name, max: 23) + try self.validate(self.hours, name: "hours", parent: name, min: 0) + try self.validate(self.minutes, name: "minutes", parent: name, max: 59) + try self.validate(self.minutes, name: "minutes", parent: name, min: 0) + } + + private enum CodingKeys: String, CodingKey { + case hours = "Hours" + case minutes = "Minutes" + } + } + public struct ParticipantCapabilities: AWSEncodableShape & AWSDecodableShape { /// The screen sharing capability that is enabled for the participant. SEND indicates the participant can share their screen. public let screenShare: ScreenShareCapability? @@ -17701,6 +18333,67 @@ extension Connect { } } + public struct SearchHoursOfOperationOverridesRequest: AWSEncodableShape { + /// The identifier of the Amazon Connect instance. + public let instanceId: String + /// The maximum number of results to return per page. Valid Range: Minimum value of 1. Maximum value of 100. + public let maxResults: Int? + /// The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. Length Constraints: Minimum length of 1. Maximum length of 2500. + public let nextToken: String? + /// The search criteria to be used to return hours of operations overrides. + public let searchCriteria: HoursOfOperationOverrideSearchCriteria? + public let searchFilter: HoursOfOperationSearchFilter? + + @inlinable + public init(instanceId: String, maxResults: Int? = nil, nextToken: String? = nil, searchCriteria: HoursOfOperationOverrideSearchCriteria? = nil, searchFilter: HoursOfOperationSearchFilter? = nil) { + self.instanceId = instanceId + self.maxResults = maxResults + self.nextToken = nextToken + self.searchCriteria = searchCriteria + self.searchFilter = searchFilter + } + + public func validate(name: String) throws { + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 100) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 1) + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2500) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.searchCriteria?.validate(name: "\(name).searchCriteria") + } + + private enum CodingKeys: String, CodingKey { + case instanceId = "InstanceId" + case maxResults = "MaxResults" + case nextToken = "NextToken" + case searchCriteria = "SearchCriteria" + case searchFilter = "SearchFilter" + } + } + + public struct SearchHoursOfOperationOverridesResponse: AWSDecodableShape { + /// The total number of hours of operations which matched your search query. + public let approximateTotalCount: Int64? + /// Information about the hours of operations overrides. + public let hoursOfOperationOverrides: [HoursOfOperationOverride]? + /// The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. Length Constraints: Minimum length of 1. Maximum length of 2500. + public let nextToken: String? + + @inlinable + public init(approximateTotalCount: Int64? = nil, hoursOfOperationOverrides: [HoursOfOperationOverride]? = nil, nextToken: String? = nil) { + self.approximateTotalCount = approximateTotalCount + self.hoursOfOperationOverrides = hoursOfOperationOverrides + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case approximateTotalCount = "ApproximateTotalCount" + case hoursOfOperationOverrides = "HoursOfOperationOverrides" + case nextToken = "NextToken" + } + } + public struct SearchHoursOfOperationsRequest: AWSEncodableShape { /// The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. public let instanceId: String @@ -21871,6 +22564,73 @@ extension Connect { } } + public struct UpdateHoursOfOperationOverrideRequest: AWSEncodableShape { + /// Configuration information for the hours of operation override: day, start time, and end time. + public let config: [HoursOfOperationOverrideConfig]? + /// The description of the hours of operation override. + public let description: String? + /// The date from when the hours of operation override would be effective. + public let effectiveFrom: String? + /// The date till when the hours of operation override would be effective. + public let effectiveTill: String? + /// The identifier for the hours of operation. + public let hoursOfOperationId: String + /// The identifier for the hours of operation override. + public let hoursOfOperationOverrideId: String + /// The identifier of the Amazon Connect instance. + public let instanceId: String + /// The name of the hours of operation override. + public let name: String? + + @inlinable + public init(config: [HoursOfOperationOverrideConfig]? = nil, description: String? = nil, effectiveFrom: String? = nil, effectiveTill: String? = nil, hoursOfOperationId: String, hoursOfOperationOverrideId: String, instanceId: String, name: String? = nil) { + self.config = config + self.description = description + self.effectiveFrom = effectiveFrom + self.effectiveTill = effectiveTill + self.hoursOfOperationId = hoursOfOperationId + self.hoursOfOperationOverrideId = hoursOfOperationOverrideId + self.instanceId = instanceId + self.name = name + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.config, forKey: .config) + try container.encodeIfPresent(self.description, forKey: .description) + try container.encodeIfPresent(self.effectiveFrom, forKey: .effectiveFrom) + try container.encodeIfPresent(self.effectiveTill, forKey: .effectiveTill) + request.encodePath(self.hoursOfOperationId, key: "HoursOfOperationId") + request.encodePath(self.hoursOfOperationOverrideId, key: "HoursOfOperationOverrideId") + request.encodePath(self.instanceId, key: "InstanceId") + try container.encodeIfPresent(self.name, forKey: .name) + } + + public func validate(name: String) throws { + try self.config?.forEach { + try $0.validate(name: "\(name).config[]") + } + try self.validate(self.config, name: "config", parent: name, max: 100) + try self.validate(self.description, name: "description", parent: name, pattern: "^[\\P{C}\\r\\n\\t]{1,250}$") + try self.validate(self.effectiveFrom, name: "effectiveFrom", parent: name, pattern: "^\\d{4}-\\d{2}-\\d{2}$") + try self.validate(self.effectiveTill, name: "effectiveTill", parent: name, pattern: "^\\d{4}-\\d{2}-\\d{2}$") + try self.validate(self.hoursOfOperationOverrideId, name: "hoursOfOperationOverrideId", parent: name, max: 36) + try self.validate(self.hoursOfOperationOverrideId, name: "hoursOfOperationOverrideId", parent: name, min: 1) + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 100) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[\\P{C}\\r\\n\\t]{1,127}$") + } + + private enum CodingKeys: String, CodingKey { + case config = "Config" + case description = "Description" + case effectiveFrom = "EffectiveFrom" + case effectiveTill = "EffectiveTill" + case name = "Name" + } + } + public struct UpdateHoursOfOperationRequest: AWSEncodableShape { /// Configuration information of the hours of operation. public let config: [HoursOfOperationConfig]? @@ -23668,9 +24428,9 @@ extension Connect { public struct UserIdentityInfo: AWSEncodableShape & AWSDecodableShape { /// The email address. If you are using SAML for identity management and include this parameter, an error is returned. public let email: String? - /// The first name. This is required if you are using Amazon Connect or SAML for identity management. + /// The first name. This is required if you are using Amazon Connect or SAML for identity management. Inputs must be in Unicode Normalization Form C (NFC). Text containing characters in a non-NFC form (for example, decomposed characters or combining marks) are not accepted. public let firstName: String? - /// The last name. This is required if you are using Amazon Connect or SAML for identity management. + /// The last name. This is required if you are using Amazon Connect or SAML for identity management. Inputs must be in Unicode Normalization Form C (NFC). Text containing characters in a non-NFC form (for example, decomposed characters or combining marks) are not accepted. public let lastName: String? /// The user's mobile number. public let mobile: String? @@ -24433,7 +25193,7 @@ public struct ConnectErrorType: AWSErrorType { /// You do not have sufficient permissions to perform this action. public static var accessDeniedException: Self { .init(.accessDeniedException) } - /// A conditional check failed. + /// Request processing failed because dependent condition failed. public static var conditionalOperationFailedException: Self { .init(.conditionalOperationFailedException) } /// Operation cannot be performed at this time as there is a conflict with another operation or contact state. public static var conflictException: Self { .init(.conflictException) } diff --git a/Sources/Soto/Services/DLM/DLM_api.swift b/Sources/Soto/Services/DLM/DLM_api.swift index d896d538d3..73f7e21c51 100644 --- a/Sources/Soto/Services/DLM/DLM_api.swift +++ b/Sources/Soto/Services/DLM/DLM_api.swift @@ -115,6 +115,14 @@ public struct DLM: AWSService { "us-west-1": "dlm.us-west-1.api.aws", "us-west-2": "dlm.us-west-2.api.aws" ]), + [.dualstack, .fips]: .init(endpoints: [ + "ca-central-1": "dlm-fips.ca-central-1.api.aws", + "ca-west-1": "dlm-fips.ca-west-1.api.aws", + "us-east-1": "dlm-fips.us-east-1.api.aws", + "us-east-2": "dlm-fips.us-east-2.api.aws", + "us-west-1": "dlm-fips.us-west-1.api.aws", + "us-west-2": "dlm-fips.us-west-2.api.aws" + ]), [.fips]: .init(endpoints: [ "us-gov-east-1": "dlm.us-gov-east-1.amazonaws.com", "us-gov-west-1": "dlm.us-gov-west-1.amazonaws.com" diff --git a/Sources/Soto/Services/DLM/DLM_shapes.swift b/Sources/Soto/Services/DLM/DLM_shapes.swift index c0f8483c64..e3e6dafefe 100644 --- a/Sources/Soto/Services/DLM/DLM_shapes.swift +++ b/Sources/Soto/Services/DLM/DLM_shapes.swift @@ -68,6 +68,7 @@ extension DLM { public enum LocationValues: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case cloud = "CLOUD" + case localZone = "LOCAL_ZONE" case outpostLocal = "OUTPOST_LOCAL" public var description: String { return self.rawValue } } @@ -87,6 +88,7 @@ extension DLM { public enum ResourceLocationValues: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case cloud = "CLOUD" + case localZone = "LOCAL_ZONE" case outpost = "OUTPOST" public var description: String { return self.rawValue } } @@ -307,20 +309,21 @@ extension DLM { public struct CreateRule: AWSEncodableShape & AWSDecodableShape { /// The schedule, as a Cron expression. The schedule interval must be between 1 hour and 1 - /// year. For more information, see Cron - /// expressions in the Amazon CloudWatch User Guide. + /// year. For more information, see the Cron expressions reference in + /// the Amazon EventBridge User Guide. public let cronExpression: String? /// The interval between snapshots. The supported values are 1, 2, 3, 4, 6, 8, 12, and 24. public let interval: Int? /// The interval unit. public let intervalUnit: IntervalUnitValues? - /// [Custom snapshot policies only] Specifies the destination for snapshots created by the policy. To create - /// snapshots in the same Region as the source resource, specify CLOUD. To create - /// snapshots on the same Outpost as the source resource, specify OUTPOST_LOCAL. - /// If you omit this parameter, CLOUD is used by default. If the policy targets resources in an Amazon Web Services Region, then you must create - /// snapshots in the same Region as the source resource. If the policy targets resources on an - /// Outpost, then you can create snapshots on the same Outpost as the source resource, or in - /// the Region of that Outpost. + /// [Custom snapshot policies only] Specifies the destination for snapshots created by the policy. The + /// allowed destinations depend on the location of the targeted resources. If the policy targets resources in a Region, then you must create snapshots + /// in the same Region as the source resource. If the policy targets resources in a Local Zone, you can create snapshots in + /// the same Local Zone or in its parent Region. If the policy targets resources on an Outpost, then you can create snapshots + /// on the same Outpost or in its parent Region. Specify one of the following values: To create snapshots in the same Region as the source resource, specify + /// CLOUD. To create snapshots in the same Local Zone as the source resource, specify + /// LOCAL_ZONE. To create snapshots on the same Outpost as the source resource, specify + /// OUTPOST_LOCAL. Default: CLOUD public let location: LocationValues? /// [Custom snapshot policies that target instances only] Specifies pre and/or post scripts for a snapshot lifecycle policy /// that targets instances. This is useful for creating application-consistent snapshots, or for @@ -861,7 +864,8 @@ extension DLM { /// The local date and time when the lifecycle policy was last modified. @OptionalCustomCoding public var dateModified: Date? - /// [Default policies only] The type of default policy. Values include: VOLUME - Default policy for EBS snapshots INSTANCE - Default policy for EBS-backed AMIs + /// Indicates whether the policy is a default lifecycle policy or a custom + /// lifecycle policy. true - the policy is a default policy. false - the policy is a custom policy. public let defaultPolicy: Bool? /// The description of the lifecycle policy. public let description: String? @@ -1063,15 +1067,18 @@ extension DLM { public let parameters: Parameters? /// The type of policy to create. Specify one of the following: SIMPLIFIED To create a default policy. STANDARD To create a custom policy. public let policyLanguage: PolicyLanguageValues? - /// [Custom policies only] The valid target resource types and actions a policy can manage. Specify EBS_SNAPSHOT_MANAGEMENT + /// The type of policy. Specify EBS_SNAPSHOT_MANAGEMENT /// to create a lifecycle policy that manages the lifecycle of Amazon EBS snapshots. Specify IMAGE_MANAGEMENT /// to create a lifecycle policy that manages the lifecycle of EBS-backed AMIs. Specify EVENT_BASED_POLICY /// to create an event-based policy that performs specific actions when a defined event occurs in your Amazon Web Services account. The default is EBS_SNAPSHOT_MANAGEMENT. public let policyType: PolicyTypeValues? - /// [Custom snapshot and AMI policies only] The location of the resources to backup. If the source resources are located in an - /// Amazon Web Services Region, specify CLOUD. If the source resources are located on an Outpost - /// in your account, specify OUTPOST. If you specify OUTPOST, Amazon Data Lifecycle Manager backs up all resources - /// of the specified type with matching target tags across all of the Outposts in your account. + /// [Custom snapshot and AMI policies only] The location of the resources to backup. If the source resources are located in a Region, specify CLOUD. In this case, + /// the policy targets all resources of the specified type with matching target tags across all + /// Availability Zones in the Region. [Custom snapshot policies only] If the source resources are located in a Local Zone, specify LOCAL_ZONE. + /// In this case, the policy targets all resources of the specified type with matching target + /// tags across all Local Zones in the Region. If the source resources are located on an Outpost in your account, specify OUTPOST. + /// In this case, the policy targets all resources of the specified type with matching target + /// tags across all of the Outposts in your account. public let resourceLocations: [ResourceLocationValues]? /// [Default policies only] Specify the type of default policy to create. To create a default policy for EBS snapshots, that creates snapshots of all volumes in the /// Region that do not have recent backups, specify VOLUME. To create a default policy for EBS-backed AMIs, that creates EBS-backed @@ -1245,9 +1252,9 @@ extension DLM { public let copyTags: Bool? /// The creation rule. public let createRule: CreateRule? - /// Specifies a rule for copying snapshots or AMIs across regions. You can't specify cross-Region copy rules for policies that create snapshots on an Outpost. - /// If the policy creates snapshots in a Region, then snapshots can be copied to up to three - /// Regions or Outposts. + /// Specifies a rule for copying snapshots or AMIs across Regions. You can't specify cross-Region copy rules for policies that create snapshots on an + /// Outpost or in a Local Zone. If the policy creates snapshots in a Region, then snapshots + /// can be copied to up to three Regions or Outposts. public let crossRegionCopyRules: [CrossRegionCopyRule]? /// [Custom AMI policies only] The AMI deprecation rule for the schedule. public let deprecateRule: DeprecateRule? diff --git a/Sources/Soto/Services/DatabaseMigrationService/DatabaseMigrationService_api.swift b/Sources/Soto/Services/DatabaseMigrationService/DatabaseMigrationService_api.swift index 25f8816f36..c24f6b1121 100644 --- a/Sources/Soto/Services/DatabaseMigrationService/DatabaseMigrationService_api.swift +++ b/Sources/Soto/Services/DatabaseMigrationService/DatabaseMigrationService_api.swift @@ -711,6 +711,7 @@ public struct DatabaseMigrationService: AWSService { /// - availabilityZone: The Availability Zone where the replication instance will be created. The default value is a random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region, for example: us-east-1d. /// - dnsNameServers: A list of custom DNS name servers supported for the replication instance to access your on-premise source or target database. This list overrides the default name servers supported by the replication instance. You can specify a comma-separated list of internet addresses for up to four on-premise DNS name servers. For example: "1.1.1.1,2.2.2.2,3.3.3.3,4.4.4.4" /// - engineVersion: The engine version number of the replication instance. If an engine version number is not specified when a replication instance is created, the default is the latest engine version available. + /// - kerberosAuthenticationSettings: Specifies the ID of the secret that stores the key cache file required for kerberos authentication, when creating a replication instance. /// - kmsKeyId: An KMS key identifier that is used to encrypt the data on the replication instance. If you don't specify a value for the KmsKeyId parameter, then DMS uses your default encryption key. KMS creates the default encryption key for your Amazon Web Services account. Your Amazon Web Services account has a different default encryption key for each Amazon Web Services Region. /// - multiAZ: Specifies whether the replication instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the Multi-AZ parameter is set to true. /// - networkType: The type of IP address protocol used by a replication instance, such as IPv4 only or Dual-stack that supports both IPv4 and IPv6 addressing. IPv6 only is not yet supported. @@ -730,6 +731,7 @@ public struct DatabaseMigrationService: AWSService { availabilityZone: String? = nil, dnsNameServers: String? = nil, engineVersion: String? = nil, + kerberosAuthenticationSettings: KerberosAuthenticationSettings? = nil, kmsKeyId: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, @@ -749,6 +751,7 @@ public struct DatabaseMigrationService: AWSService { availabilityZone: availabilityZone, dnsNameServers: dnsNameServers, engineVersion: engineVersion, + kerberosAuthenticationSettings: kerberosAuthenticationSettings, kmsKeyId: kmsKeyId, multiAZ: multiAZ, networkType: networkType, @@ -1534,7 +1537,7 @@ public struct DatabaseMigrationService: AWSService { /// Returns a paginated list of data providers for your account in the current region. /// /// Parameters: - /// - filters: Filters applied to the data providers described in the form of key-value pairs. Valid filter names: data-provider-identifier + /// - filters: Filters applied to the data providers described in the form of key-value pairs. Valid filter names and values: data-provider-identifier, data provider arn or name /// - marker: Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged. /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, DMS includes a pagination token in the response so that you can retrieve the remaining results. /// - logger: Logger use during operation @@ -2039,7 +2042,7 @@ public struct DatabaseMigrationService: AWSService { /// Returns a paginated list of instance profiles for your account in the current region. /// /// Parameters: - /// - filters: Filters applied to the instance profiles described in the form of key-value pairs. + /// - filters: Filters applied to the instance profiles described in the form of key-value pairs. Valid filter names and values: instance-profile-identifier, instance profile arn or name /// - marker: Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged. /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, DMS includes a pagination token in the response so that you can retrieve the remaining results. /// - logger: Logger use during operation @@ -2264,7 +2267,7 @@ public struct DatabaseMigrationService: AWSService { /// Returns a paginated list of migration projects for your account in the current region. /// /// Parameters: - /// - filters: Filters applied to the migration projects described in the form of key-value pairs. + /// - filters: Filters applied to the migration projects described in the form of key-value pairs. Valid filter names and values: instance-profile-identifier, instance profile arn or name data-provider-identifier, data provider arn or name migration-project-identifier, migration project arn or name /// - marker: Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged. /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, DMS includes a pagination token in the response so that you can retrieve the remaining results. /// - logger: Logger use during operation @@ -3461,6 +3464,7 @@ public struct DatabaseMigrationService: AWSService { /// - applyImmediately: Indicates whether the changes should be applied immediately or during the next maintenance window. /// - autoMinorVersionUpgrade: A value that indicates that minor version upgrades are applied automatically to the replication instance during the maintenance window. Changing this parameter doesn't result in an outage, except in the case described following. The change is asynchronously applied as soon as possible. An outage does result if these factors apply: This parameter is set to true during the maintenance window. A newer minor version is available. DMS has enabled automatic patching for the given engine version. /// - engineVersion: The engine version number of the replication instance. When modifying a major engine version of an instance, also set AllowMajorVersionUpgrade to true. + /// - kerberosAuthenticationSettings: Specifies the ID of the secret that stores the key cache file required for kerberos authentication, when modifying a replication instance. /// - multiAZ: Specifies whether the replication instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the Multi-AZ parameter is set to true. /// - networkType: The type of IP address protocol used by a replication instance, such as IPv4 only or Dual-stack that supports both IPv4 and IPv6 addressing. IPv6 only is not yet supported. /// - preferredMaintenanceWindow: The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied. Default: Uses existing setting Format: ddd:hh24:mi-ddd:hh24:mi Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Must be at least 30 minutes @@ -3476,6 +3480,7 @@ public struct DatabaseMigrationService: AWSService { applyImmediately: Bool? = nil, autoMinorVersionUpgrade: Bool? = nil, engineVersion: String? = nil, + kerberosAuthenticationSettings: KerberosAuthenticationSettings? = nil, multiAZ: Bool? = nil, networkType: String? = nil, preferredMaintenanceWindow: String? = nil, @@ -3491,6 +3496,7 @@ public struct DatabaseMigrationService: AWSService { applyImmediately: applyImmediately, autoMinorVersionUpgrade: autoMinorVersionUpgrade, engineVersion: engineVersion, + kerberosAuthenticationSettings: kerberosAuthenticationSettings, multiAZ: multiAZ, networkType: networkType, preferredMaintenanceWindow: preferredMaintenanceWindow, @@ -4092,7 +4098,7 @@ public struct DatabaseMigrationService: AWSService { /// - cdcStartTime: Indicates the start time for a change data capture (CDC) operation. Use either CdcStartTime or CdcStartPosition to specify when you want a CDC operation to start. Specifying both values results in an error. /// - cdcStopPosition: Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time. /// - replicationConfigArn: The Amazon Resource Name of the replication for which to start replication. - /// - startReplicationType: The replication type. + /// - startReplicationType: The replication type. When the replication type is full-load or full-load-and-cdc, the only valid value for the first run of the replication is start-replication. This option will start the replication. You can also use ReloadTables to reload specific tables that failed during replication instead of restarting the replication. The resume-processing option isn't applicable for a full-load replication, because you can't resume partially loaded tables during the full load phase. For a full-load-and-cdc replication, DMS migrates table data, and then applies data changes that occur on the source. To load all the tables again, and start capturing source changes, use reload-target. Otherwise use resume-processing, to replicate the changes from the last stop position. /// - logger: Logger use during operation @inlinable public func startReplication( @@ -4588,7 +4594,7 @@ extension DatabaseMigrationService { /// Return PaginatorSequence for operation ``describeDataProviders(_:logger:)``. /// /// - Parameters: - /// - filters: Filters applied to the data providers described in the form of key-value pairs. Valid filter names: data-provider-identifier + /// - filters: Filters applied to the data providers described in the form of key-value pairs. Valid filter names and values: data-provider-identifier, data provider arn or name /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, DMS includes a pagination token in the response so that you can retrieve the remaining results. /// - logger: Logger used for logging @inlinable @@ -5087,7 +5093,7 @@ extension DatabaseMigrationService { /// Return PaginatorSequence for operation ``describeInstanceProfiles(_:logger:)``. /// /// - Parameters: - /// - filters: Filters applied to the instance profiles described in the form of key-value pairs. + /// - filters: Filters applied to the instance profiles described in the form of key-value pairs. Valid filter names and values: instance-profile-identifier, instance profile arn or name /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, DMS includes a pagination token in the response so that you can retrieve the remaining results. /// - logger: Logger used for logging @inlinable @@ -5324,7 +5330,7 @@ extension DatabaseMigrationService { /// Return PaginatorSequence for operation ``describeMigrationProjects(_:logger:)``. /// /// - Parameters: - /// - filters: Filters applied to the migration projects described in the form of key-value pairs. + /// - filters: Filters applied to the migration projects described in the form of key-value pairs. Valid filter names and values: instance-profile-identifier, instance profile arn or name data-provider-identifier, data provider arn or name migration-project-identifier, migration project arn or name /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, DMS includes a pagination token in the response so that you can retrieve the remaining results. /// - logger: Logger used for logging @inlinable diff --git a/Sources/Soto/Services/DatabaseMigrationService/DatabaseMigrationService_shapes.swift b/Sources/Soto/Services/DatabaseMigrationService/DatabaseMigrationService_shapes.swift index 154c35ddab..0f5de13202 100644 --- a/Sources/Soto/Services/DatabaseMigrationService/DatabaseMigrationService_shapes.swift +++ b/Sources/Soto/Services/DatabaseMigrationService/DatabaseMigrationService_shapes.swift @@ -180,6 +180,12 @@ extension DatabaseMigrationService { public var description: String { return self.rawValue } } + public enum OracleAuthenticationMethod: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case kerberos = "kerberos" + case password = "password" + public var description: String { return self.rawValue } + } + public enum OriginTypeValue: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case source = "SOURCE" case target = "TARGET" @@ -243,6 +249,12 @@ extension DatabaseMigrationService { public var description: String { return self.rawValue } } + public enum SqlServerAuthenticationMethod: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case kerberos = "kerberos" + case password = "password" + public var description: String { return self.rawValue } + } + public enum SslSecurityProtocolValue: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case plaintext = "plaintext" case sslEncryption = "ssl-encryption" @@ -1340,6 +1352,8 @@ extension DatabaseMigrationService { public let dnsNameServers: String? /// The engine version number of the replication instance. If an engine version number is not specified when a replication instance is created, the default is the latest engine version available. public let engineVersion: String? + /// Specifies the ID of the secret that stores the key cache file required for kerberos authentication, when creating a replication instance. + public let kerberosAuthenticationSettings: KerberosAuthenticationSettings? /// An KMS key identifier that is used to encrypt the data on the replication instance. If you don't specify a value for the KmsKeyId parameter, then DMS uses your default encryption key. KMS creates the default encryption key for your Amazon Web Services account. Your Amazon Web Services account has a different default encryption key for each Amazon Web Services Region. public let kmsKeyId: String? /// Specifies whether the replication instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the Multi-AZ parameter is set to true. @@ -1364,12 +1378,13 @@ extension DatabaseMigrationService { public let vpcSecurityGroupIds: [String]? @inlinable - public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, dnsNameServers: String? = nil, engineVersion: String? = nil, kmsKeyId: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, preferredMaintenanceWindow: String? = nil, publiclyAccessible: Bool? = nil, replicationInstanceClass: String, replicationInstanceIdentifier: String, replicationSubnetGroupIdentifier: String? = nil, resourceIdentifier: String? = nil, tags: [Tag]? = nil, vpcSecurityGroupIds: [String]? = nil) { + public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, dnsNameServers: String? = nil, engineVersion: String? = nil, kerberosAuthenticationSettings: KerberosAuthenticationSettings? = nil, kmsKeyId: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, preferredMaintenanceWindow: String? = nil, publiclyAccessible: Bool? = nil, replicationInstanceClass: String, replicationInstanceIdentifier: String, replicationSubnetGroupIdentifier: String? = nil, resourceIdentifier: String? = nil, tags: [Tag]? = nil, vpcSecurityGroupIds: [String]? = nil) { self.allocatedStorage = allocatedStorage self.autoMinorVersionUpgrade = autoMinorVersionUpgrade self.availabilityZone = availabilityZone self.dnsNameServers = dnsNameServers self.engineVersion = engineVersion + self.kerberosAuthenticationSettings = kerberosAuthenticationSettings self.kmsKeyId = kmsKeyId self.multiAZ = multiAZ self.networkType = networkType @@ -1383,12 +1398,17 @@ extension DatabaseMigrationService { self.vpcSecurityGroupIds = vpcSecurityGroupIds } + public func validate(name: String) throws { + try self.validate(self.replicationInstanceClass, name: "replicationInstanceClass", parent: name, max: 30) + } + private enum CodingKeys: String, CodingKey { case allocatedStorage = "AllocatedStorage" case autoMinorVersionUpgrade = "AutoMinorVersionUpgrade" case availabilityZone = "AvailabilityZone" case dnsNameServers = "DnsNameServers" case engineVersion = "EngineVersion" + case kerberosAuthenticationSettings = "KerberosAuthenticationSettings" case kmsKeyId = "KmsKeyId" case multiAZ = "MultiAZ" case networkType = "NetworkType" @@ -2522,7 +2542,7 @@ extension DatabaseMigrationService { } public struct DescribeDataProvidersMessage: AWSEncodableShape { - /// Filters applied to the data providers described in the form of key-value pairs. Valid filter names: data-provider-identifier + /// Filters applied to the data providers described in the form of key-value pairs. Valid filter names and values: data-provider-identifier, data provider arn or name public let filters: [Filter]? /// Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged. public let marker: String? @@ -3098,7 +3118,7 @@ extension DatabaseMigrationService { } public struct DescribeInstanceProfilesMessage: AWSEncodableShape { - /// Filters applied to the instance profiles described in the form of key-value pairs. + /// Filters applied to the instance profiles described in the form of key-value pairs. Valid filter names and values: instance-profile-identifier, instance profile arn or name public let filters: [Filter]? /// Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged. public let marker: String? @@ -3358,7 +3378,7 @@ extension DatabaseMigrationService { } public struct DescribeMigrationProjectsMessage: AWSEncodableShape { - /// Filters applied to the migration projects described in the form of key-value pairs. + /// Filters applied to the migration projects described in the form of key-value pairs. Valid filter names and values: instance-profile-identifier, instance profile arn or name data-provider-identifier, data provider arn or name migration-project-identifier, migration project arn or name public let filters: [Filter]? /// Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged. public let marker: String? @@ -5013,9 +5033,11 @@ extension DatabaseMigrationService { public let sslEndpointIdentificationAlgorithm: KafkaSslEndpointIdentificationAlgorithm? /// The topic to which you migrate the data. If you don't specify a topic, DMS specifies "kafka-default-topic" as the migration topic. public let topic: String? + /// Specifies using the large integer value with Kafka. + public let useLargeIntegerValue: Bool? @inlinable - public init(broker: String? = nil, includeControlDetails: Bool? = nil, includeNullAndEmpty: Bool? = nil, includePartitionValue: Bool? = nil, includeTableAlterOperations: Bool? = nil, includeTransactionDetails: Bool? = nil, messageFormat: MessageFormatValue? = nil, messageMaxBytes: Int? = nil, noHexPrefix: Bool? = nil, partitionIncludeSchemaTable: Bool? = nil, saslMechanism: KafkaSaslMechanism? = nil, saslPassword: String? = nil, saslUsername: String? = nil, securityProtocol: KafkaSecurityProtocol? = nil, sslCaCertificateArn: String? = nil, sslClientCertificateArn: String? = nil, sslClientKeyArn: String? = nil, sslClientKeyPassword: String? = nil, sslEndpointIdentificationAlgorithm: KafkaSslEndpointIdentificationAlgorithm? = nil, topic: String? = nil) { + public init(broker: String? = nil, includeControlDetails: Bool? = nil, includeNullAndEmpty: Bool? = nil, includePartitionValue: Bool? = nil, includeTableAlterOperations: Bool? = nil, includeTransactionDetails: Bool? = nil, messageFormat: MessageFormatValue? = nil, messageMaxBytes: Int? = nil, noHexPrefix: Bool? = nil, partitionIncludeSchemaTable: Bool? = nil, saslMechanism: KafkaSaslMechanism? = nil, saslPassword: String? = nil, saslUsername: String? = nil, securityProtocol: KafkaSecurityProtocol? = nil, sslCaCertificateArn: String? = nil, sslClientCertificateArn: String? = nil, sslClientKeyArn: String? = nil, sslClientKeyPassword: String? = nil, sslEndpointIdentificationAlgorithm: KafkaSslEndpointIdentificationAlgorithm? = nil, topic: String? = nil, useLargeIntegerValue: Bool? = nil) { self.broker = broker self.includeControlDetails = includeControlDetails self.includeNullAndEmpty = includeNullAndEmpty @@ -5036,6 +5058,7 @@ extension DatabaseMigrationService { self.sslClientKeyPassword = sslClientKeyPassword self.sslEndpointIdentificationAlgorithm = sslEndpointIdentificationAlgorithm self.topic = topic + self.useLargeIntegerValue = useLargeIntegerValue } private enum CodingKeys: String, CodingKey { @@ -5059,6 +5082,29 @@ extension DatabaseMigrationService { case sslClientKeyPassword = "SslClientKeyPassword" case sslEndpointIdentificationAlgorithm = "SslEndpointIdentificationAlgorithm" case topic = "Topic" + case useLargeIntegerValue = "UseLargeIntegerValue" + } + } + + public struct KerberosAuthenticationSettings: AWSEncodableShape & AWSDecodableShape { + /// Specifies the Amazon Resource Name (ARN) of the IAM role that grants Amazon Web Services DMS access to the secret containing key cache file for the replication instance. + public let keyCacheSecretIamArn: String? + /// Specifies the secret ID of the key cache for the replication instance. + public let keyCacheSecretId: String? + /// Specifies the ID of the secret that stores the key cache file required for kerberos authentication of the replication instance. + public let krb5FileContents: String? + + @inlinable + public init(keyCacheSecretIamArn: String? = nil, keyCacheSecretId: String? = nil, krb5FileContents: String? = nil) { + self.keyCacheSecretIamArn = keyCacheSecretIamArn + self.keyCacheSecretId = keyCacheSecretId + self.krb5FileContents = krb5FileContents + } + + private enum CodingKeys: String, CodingKey { + case keyCacheSecretIamArn = "KeyCacheSecretIamArn" + case keyCacheSecretId = "KeyCacheSecretId" + case krb5FileContents = "Krb5FileContents" } } @@ -5083,9 +5129,11 @@ extension DatabaseMigrationService { public let serviceAccessRoleArn: String? /// The Amazon Resource Name (ARN) for the Amazon Kinesis Data Streams endpoint. public let streamArn: String? + /// Specifies using the large integer value with Kinesis. + public let useLargeIntegerValue: Bool? @inlinable - public init(includeControlDetails: Bool? = nil, includeNullAndEmpty: Bool? = nil, includePartitionValue: Bool? = nil, includeTableAlterOperations: Bool? = nil, includeTransactionDetails: Bool? = nil, messageFormat: MessageFormatValue? = nil, noHexPrefix: Bool? = nil, partitionIncludeSchemaTable: Bool? = nil, serviceAccessRoleArn: String? = nil, streamArn: String? = nil) { + public init(includeControlDetails: Bool? = nil, includeNullAndEmpty: Bool? = nil, includePartitionValue: Bool? = nil, includeTableAlterOperations: Bool? = nil, includeTransactionDetails: Bool? = nil, messageFormat: MessageFormatValue? = nil, noHexPrefix: Bool? = nil, partitionIncludeSchemaTable: Bool? = nil, serviceAccessRoleArn: String? = nil, streamArn: String? = nil, useLargeIntegerValue: Bool? = nil) { self.includeControlDetails = includeControlDetails self.includeNullAndEmpty = includeNullAndEmpty self.includePartitionValue = includePartitionValue @@ -5096,6 +5144,7 @@ extension DatabaseMigrationService { self.partitionIncludeSchemaTable = partitionIncludeSchemaTable self.serviceAccessRoleArn = serviceAccessRoleArn self.streamArn = streamArn + self.useLargeIntegerValue = useLargeIntegerValue } private enum CodingKeys: String, CodingKey { @@ -5109,6 +5158,7 @@ extension DatabaseMigrationService { case partitionIncludeSchemaTable = "PartitionIncludeSchemaTable" case serviceAccessRoleArn = "ServiceAccessRoleArn" case streamArn = "StreamArn" + case useLargeIntegerValue = "UseLargeIntegerValue" } } @@ -5205,6 +5255,8 @@ extension DatabaseMigrationService { } public struct MicrosoftSQLServerSettings: AWSEncodableShape & AWSDecodableShape { + /// Specifies using Kerberos authentication with Microsoft SQL Server. + public let authenticationMethod: SqlServerAuthenticationMethod? /// The maximum size of the packets (in bytes) used to transfer data using BCP. public let bcpPacketSize: Int? /// Specifies a file group for the DMS internal tables. When the replication task starts, all the internal DMS control tables (awsdms_ apply_exception, awsdms_apply, awsdms_changes) are created for the specified file group. @@ -5241,7 +5293,8 @@ extension DatabaseMigrationService { public let useThirdPartyBackupDevice: Bool? @inlinable - public init(bcpPacketSize: Int? = nil, controlTablesFileGroup: String? = nil, databaseName: String? = nil, forceLobLookup: Bool? = nil, password: String? = nil, port: Int? = nil, querySingleAlwaysOnNode: Bool? = nil, readBackupOnly: Bool? = nil, safeguardPolicy: SafeguardPolicy? = nil, secretsManagerAccessRoleArn: String? = nil, secretsManagerSecretId: String? = nil, serverName: String? = nil, tlogAccessMode: TlogAccessMode? = nil, trimSpaceInChar: Bool? = nil, useBcpFullLoad: Bool? = nil, username: String? = nil, useThirdPartyBackupDevice: Bool? = nil) { + public init(authenticationMethod: SqlServerAuthenticationMethod? = nil, bcpPacketSize: Int? = nil, controlTablesFileGroup: String? = nil, databaseName: String? = nil, forceLobLookup: Bool? = nil, password: String? = nil, port: Int? = nil, querySingleAlwaysOnNode: Bool? = nil, readBackupOnly: Bool? = nil, safeguardPolicy: SafeguardPolicy? = nil, secretsManagerAccessRoleArn: String? = nil, secretsManagerSecretId: String? = nil, serverName: String? = nil, tlogAccessMode: TlogAccessMode? = nil, trimSpaceInChar: Bool? = nil, useBcpFullLoad: Bool? = nil, username: String? = nil, useThirdPartyBackupDevice: Bool? = nil) { + self.authenticationMethod = authenticationMethod self.bcpPacketSize = bcpPacketSize self.controlTablesFileGroup = controlTablesFileGroup self.databaseName = databaseName @@ -5262,6 +5315,7 @@ extension DatabaseMigrationService { } private enum CodingKeys: String, CodingKey { + case authenticationMethod = "AuthenticationMethod" case bcpPacketSize = "BcpPacketSize" case controlTablesFileGroup = "ControlTablesFileGroup" case databaseName = "DatabaseName" @@ -5889,6 +5943,8 @@ extension DatabaseMigrationService { public let autoMinorVersionUpgrade: Bool? /// The engine version number of the replication instance. When modifying a major engine version of an instance, also set AllowMajorVersionUpgrade to true. public let engineVersion: String? + /// Specifies the ID of the secret that stores the key cache file required for kerberos authentication, when modifying a replication instance. + public let kerberosAuthenticationSettings: KerberosAuthenticationSettings? /// Specifies whether the replication instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the Multi-AZ parameter is set to true. public let multiAZ: Bool? /// The type of IP address protocol used by a replication instance, such as IPv4 only or Dual-stack that supports both IPv4 and IPv6 addressing. IPv6 only is not yet supported. @@ -5905,12 +5961,13 @@ extension DatabaseMigrationService { public let vpcSecurityGroupIds: [String]? @inlinable - public init(allocatedStorage: Int? = nil, allowMajorVersionUpgrade: Bool? = nil, applyImmediately: Bool? = nil, autoMinorVersionUpgrade: Bool? = nil, engineVersion: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, preferredMaintenanceWindow: String? = nil, replicationInstanceArn: String, replicationInstanceClass: String? = nil, replicationInstanceIdentifier: String? = nil, vpcSecurityGroupIds: [String]? = nil) { + public init(allocatedStorage: Int? = nil, allowMajorVersionUpgrade: Bool? = nil, applyImmediately: Bool? = nil, autoMinorVersionUpgrade: Bool? = nil, engineVersion: String? = nil, kerberosAuthenticationSettings: KerberosAuthenticationSettings? = nil, multiAZ: Bool? = nil, networkType: String? = nil, preferredMaintenanceWindow: String? = nil, replicationInstanceArn: String, replicationInstanceClass: String? = nil, replicationInstanceIdentifier: String? = nil, vpcSecurityGroupIds: [String]? = nil) { self.allocatedStorage = allocatedStorage self.allowMajorVersionUpgrade = allowMajorVersionUpgrade self.applyImmediately = applyImmediately self.autoMinorVersionUpgrade = autoMinorVersionUpgrade self.engineVersion = engineVersion + self.kerberosAuthenticationSettings = kerberosAuthenticationSettings self.multiAZ = multiAZ self.networkType = networkType self.preferredMaintenanceWindow = preferredMaintenanceWindow @@ -5920,12 +5977,17 @@ extension DatabaseMigrationService { self.vpcSecurityGroupIds = vpcSecurityGroupIds } + public func validate(name: String) throws { + try self.validate(self.replicationInstanceClass, name: "replicationInstanceClass", parent: name, max: 30) + } + private enum CodingKeys: String, CodingKey { case allocatedStorage = "AllocatedStorage" case allowMajorVersionUpgrade = "AllowMajorVersionUpgrade" case applyImmediately = "ApplyImmediately" case autoMinorVersionUpgrade = "AutoMinorVersionUpgrade" case engineVersion = "EngineVersion" + case kerberosAuthenticationSettings = "KerberosAuthenticationSettings" case multiAZ = "MultiAZ" case networkType = "NetworkType" case preferredMaintenanceWindow = "PreferredMaintenanceWindow" @@ -6389,7 +6451,7 @@ extension DatabaseMigrationService { public let allowSelectNestedTables: Bool? /// Specifies the ID of the destination for the archived redo logs. This value should be the same as a number in the dest_id column of the v$archived_log view. If you work with an additional redo log destination, use the AdditionalArchivedLogDestId option to specify the additional destination ID. Doing this improves performance by ensuring that the correct logs are accessed from the outset. public let archivedLogDestId: Int? - /// When this field is set to Y, DMS only accesses the archived redo logs. If the archived redo logs are stored on Automatic Storage Management (ASM) only, the DMS user account needs to be granted ASM privileges. + /// When this field is set to True, DMS only accesses the archived redo logs. If the archived redo logs are stored on Automatic Storage Management (ASM) only, the DMS user account needs to be granted ASM privileges. public let archivedLogsOnly: Bool? /// For an Oracle source endpoint, your Oracle Automatic Storage Management (ASM) password. You can set this value from the asm_user_password value. You set this value as part of the comma-separated value that you set to the Password request parameter when you create the endpoint to access transaction logs using Binary Reader. For more information, see Configuration for change data capture (CDC) on an Oracle source database. public let asmPassword: String? @@ -6397,6 +6459,8 @@ extension DatabaseMigrationService { public let asmServer: String? /// For an Oracle source endpoint, your ASM user name. You can set this value from the asm_user value. You set asm_user as part of the extra connection attribute string to access an Oracle server with Binary Reader that uses ASM. For more information, see Configuration for change data capture (CDC) on an Oracle source database. public let asmUser: String? + /// Specifies using Kerberos authentication with Oracle. + public let authenticationMethod: OracleAuthenticationMethod? /// Specifies whether the length of a character column is in bytes or in characters. To indicate that the character column length is in characters, set this attribute to CHAR. Otherwise, the character column length is in bytes. Example: charLengthSemantics=CHAR; public let charLengthSemantics: CharLengthSemantics? /// When true, converts timestamps with the timezone datatype to their UTC value. @@ -6415,7 +6479,7 @@ extension DatabaseMigrationService { public let failTasksOnLobTruncation: Bool? /// Specifies the number scale. You can select a scale up to 38, or you can select FLOAT. By default, the NUMBER data type is converted to precision 38, scale 10. Example: numberDataTypeScale=12 public let numberDatatypeScale: Int? - /// The timeframe in minutes to check for open transactions for a CDC-only task. You can specify an integer value between 0 (the default) and 240 (the maximum). This parameter is only valid in DMS version 3.5.0 and later. DMS supports a window of up to 9.5 hours including the value for OpenTransactionWindow. + /// The timeframe in minutes to check for open transactions for a CDC-only task. You can specify an integer value between 0 (the default) and 240 (the maximum). This parameter is only valid in DMS version 3.5.0 and later. public let openTransactionWindow: Int? /// Set this string attribute to the required value in order to use the Binary Reader to capture change data for an Amazon RDS for Oracle as the source. This value specifies the default Oracle root used to access the redo logs. public let oraclePathPrefix: String? @@ -6455,11 +6519,11 @@ extension DatabaseMigrationService { public let trimSpaceInChar: Bool? /// Set this attribute to true in order to use the Binary Reader to capture change data for an Amazon RDS for Oracle as the source. This tells the DMS instance to use any specified prefix replacement to access all online redo logs. public let useAlternateFolderForOnline: Bool? - /// Set this attribute to Y to capture change data using the Binary Reader utility. Set UseLogminerReader to N to set this attribute to Y. To use Binary Reader with Amazon RDS for Oracle as the source, you set additional attributes. For more information about using this setting with Oracle Automatic Storage Management (ASM), see Using Oracle LogMiner or DMS Binary Reader for CDC. + /// Set this attribute to True to capture change data using the Binary Reader utility. Set UseLogminerReader to False to set this attribute to True. To use Binary Reader with Amazon RDS for Oracle as the source, you set additional attributes. For more information about using this setting with Oracle Automatic Storage Management (ASM), see Using Oracle LogMiner or DMS Binary Reader for CDC. public let useBFile: Bool? - /// Set this attribute to Y to have DMS use a direct path full load. Specify this value to use the direct path protocol in the Oracle Call Interface (OCI). By using this OCI protocol, you can bulk-load Oracle target tables during a full load. + /// Set this attribute to True to have DMS use a direct path full load. Specify this value to use the direct path protocol in the Oracle Call Interface (OCI). By using this OCI protocol, you can bulk-load Oracle target tables during a full load. public let useDirectPathFullLoad: Bool? - /// Set this attribute to Y to capture change data using the Oracle LogMiner utility (the default). Set this attribute to N if you want to access the redo logs as a binary file. When you set UseLogminerReader to N, also set UseBfile to Y. For more information on this setting and using Oracle ASM, see Using Oracle LogMiner or DMS Binary Reader for CDC in the DMS User Guide. + /// Set this attribute to True to capture change data using the Oracle LogMiner utility (the default). Set this attribute to False if you want to access the redo logs as a binary file. When you set UseLogminerReader to False, also set UseBfile to True. For more information on this setting and using Oracle ASM, see Using Oracle LogMiner or DMS Binary Reader for CDC in the DMS User Guide. public let useLogminerReader: Bool? /// Set this string attribute to the required value in order to use the Binary Reader to capture change data for an Amazon RDS for Oracle as the source. This value specifies the path prefix used to replace the default Oracle root to access the redo logs. public let usePathPrefix: String? @@ -6467,7 +6531,7 @@ extension DatabaseMigrationService { public let username: String? @inlinable - public init(accessAlternateDirectly: Bool? = nil, additionalArchivedLogDestId: Int? = nil, addSupplementalLogging: Bool? = nil, allowSelectNestedTables: Bool? = nil, archivedLogDestId: Int? = nil, archivedLogsOnly: Bool? = nil, asmPassword: String? = nil, asmServer: String? = nil, asmUser: String? = nil, charLengthSemantics: CharLengthSemantics? = nil, convertTimestampWithZoneToUTC: Bool? = nil, databaseName: String? = nil, directPathNoLog: Bool? = nil, directPathParallelLoad: Bool? = nil, enableHomogenousTablespace: Bool? = nil, extraArchivedLogDestIds: [Int]? = nil, failTasksOnLobTruncation: Bool? = nil, numberDatatypeScale: Int? = nil, openTransactionWindow: Int? = nil, oraclePathPrefix: String? = nil, parallelAsmReadThreads: Int? = nil, password: String? = nil, port: Int? = nil, readAheadBlocks: Int? = nil, readTableSpaceName: Bool? = nil, replacePathPrefix: Bool? = nil, retryInterval: Int? = nil, secretsManagerAccessRoleArn: String? = nil, secretsManagerOracleAsmAccessRoleArn: String? = nil, secretsManagerOracleAsmSecretId: String? = nil, secretsManagerSecretId: String? = nil, securityDbEncryption: String? = nil, securityDbEncryptionName: String? = nil, serverName: String? = nil, spatialDataOptionToGeoJsonFunctionName: String? = nil, standbyDelayTime: Int? = nil, trimSpaceInChar: Bool? = nil, useAlternateFolderForOnline: Bool? = nil, useBFile: Bool? = nil, useDirectPathFullLoad: Bool? = nil, useLogminerReader: Bool? = nil, usePathPrefix: String? = nil, username: String? = nil) { + public init(accessAlternateDirectly: Bool? = nil, additionalArchivedLogDestId: Int? = nil, addSupplementalLogging: Bool? = nil, allowSelectNestedTables: Bool? = nil, archivedLogDestId: Int? = nil, archivedLogsOnly: Bool? = nil, asmPassword: String? = nil, asmServer: String? = nil, asmUser: String? = nil, authenticationMethod: OracleAuthenticationMethod? = nil, charLengthSemantics: CharLengthSemantics? = nil, convertTimestampWithZoneToUTC: Bool? = nil, databaseName: String? = nil, directPathNoLog: Bool? = nil, directPathParallelLoad: Bool? = nil, enableHomogenousTablespace: Bool? = nil, extraArchivedLogDestIds: [Int]? = nil, failTasksOnLobTruncation: Bool? = nil, numberDatatypeScale: Int? = nil, openTransactionWindow: Int? = nil, oraclePathPrefix: String? = nil, parallelAsmReadThreads: Int? = nil, password: String? = nil, port: Int? = nil, readAheadBlocks: Int? = nil, readTableSpaceName: Bool? = nil, replacePathPrefix: Bool? = nil, retryInterval: Int? = nil, secretsManagerAccessRoleArn: String? = nil, secretsManagerOracleAsmAccessRoleArn: String? = nil, secretsManagerOracleAsmSecretId: String? = nil, secretsManagerSecretId: String? = nil, securityDbEncryption: String? = nil, securityDbEncryptionName: String? = nil, serverName: String? = nil, spatialDataOptionToGeoJsonFunctionName: String? = nil, standbyDelayTime: Int? = nil, trimSpaceInChar: Bool? = nil, useAlternateFolderForOnline: Bool? = nil, useBFile: Bool? = nil, useDirectPathFullLoad: Bool? = nil, useLogminerReader: Bool? = nil, usePathPrefix: String? = nil, username: String? = nil) { self.accessAlternateDirectly = accessAlternateDirectly self.additionalArchivedLogDestId = additionalArchivedLogDestId self.addSupplementalLogging = addSupplementalLogging @@ -6477,6 +6541,7 @@ extension DatabaseMigrationService { self.asmPassword = asmPassword self.asmServer = asmServer self.asmUser = asmUser + self.authenticationMethod = authenticationMethod self.charLengthSemantics = charLengthSemantics self.convertTimestampWithZoneToUTC = convertTimestampWithZoneToUTC self.databaseName = databaseName @@ -6523,6 +6588,7 @@ extension DatabaseMigrationService { case asmPassword = "AsmPassword" case asmServer = "AsmServer" case asmUser = "AsmUser" + case authenticationMethod = "AuthenticationMethod" case charLengthSemantics = "CharLengthSemantics" case convertTimestampWithZoneToUTC = "ConvertTimestampWithZoneToUTC" case databaseName = "DatabaseName" @@ -6645,35 +6711,37 @@ extension DatabaseMigrationService { public let afterConnectScript: String? /// The Babelfish for Aurora PostgreSQL database name for the endpoint. public let babelfishDatabaseName: String? - /// To capture DDL events, DMS creates various artifacts in the PostgreSQL database when the task starts. You can later remove these artifacts. If this value is set to N, you don't have to create tables or triggers on the source database. + /// To capture DDL events, DMS creates various artifacts in the PostgreSQL database when the task starts. You can later remove these artifacts. The default value is true. If this value is set to N, you don't have to create tables or triggers on the source database. public let captureDdls: Bool? /// Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints. public let databaseMode: DatabaseMode? /// Database name for the endpoint. public let databaseName: String? - /// The schema in which the operational DDL database artifacts are created. Example: ddlArtifactsSchema=xyzddlschema; + /// The schema in which the operational DDL database artifacts are created. The default value is public. Example: ddlArtifactsSchema=xyzddlschema; public let ddlArtifactsSchema: String? + /// Disables the Unicode source filter with PostgreSQL, for values passed into the Selection rule filter on Source Endpoint column values. By default DMS performs source filter comparisons using a Unicode string which can cause look ups to ignore the indexes in the text columns and slow down migrations. Unicode support should only be disabled when using a selection rule filter is on a text column in the Source database that is indexed. + public let disableUnicodeSourceFilter: Bool? /// Sets the client statement timeout for the PostgreSQL instance, in seconds. The default value is 60 seconds. Example: executeTimeout=100; public let executeTimeout: Int? - /// When set to true, this value causes a task to fail if the actual size of a LOB column is greater than the specified LobMaxSize. If task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating the LOB data. + /// When set to true, this value causes a task to fail if the actual size of a LOB column is greater than the specified LobMaxSize. The default value is false. If task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating the LOB data. public let failTasksOnLobTruncation: Bool? - /// The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source. This heartbeat keeps restart_lsn moving and prevents storage full scenarios. + /// The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source. This heartbeat keeps restart_lsn moving and prevents storage full scenarios. The default value is false. public let heartbeatEnable: Bool? - /// Sets the WAL heartbeat frequency (in minutes). + /// Sets the WAL heartbeat frequency (in minutes). The default value is 5 minutes. public let heartbeatFrequency: Int? - /// Sets the schema in which the heartbeat artifacts are created. + /// Sets the schema in which the heartbeat artifacts are created. The default value is public. public let heartbeatSchema: String? - /// When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as varchar(5). You must set this setting on both the source and target endpoints for it to take effect. + /// When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as varchar(5). You must set this setting on both the source and target endpoints for it to take effect. The default value is false. public let mapBooleanAsBoolean: Bool? - /// When true, DMS migrates JSONB values as CLOB. + /// When true, DMS migrates JSONB values as CLOB. The default value is false. public let mapJsonbAsClob: Bool? - /// When true, DMS migrates LONG values as VARCHAR. + /// Sets what datatype to map LONG values as. The default value is wstring. public let mapLongVarcharAs: LongVarcharMappingType? - /// Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Example: maxFileSize=512 + /// Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. The default value is 32,768 KB (32 MB). Example: maxFileSize=512 public let maxFileSize: Int? /// Endpoint connection password. public let password: String? - /// Specifies the plugin to use to create a replication slot. + /// Specifies the plugin to use to create a replication slot. The default value is pglogical. public let pluginName: PluginNameValue? /// Endpoint TCP port. The default is 5432. public let port: Int? @@ -6691,13 +6759,14 @@ extension DatabaseMigrationService { public let username: String? @inlinable - public init(afterConnectScript: String? = nil, babelfishDatabaseName: String? = nil, captureDdls: Bool? = nil, databaseMode: DatabaseMode? = nil, databaseName: String? = nil, ddlArtifactsSchema: String? = nil, executeTimeout: Int? = nil, failTasksOnLobTruncation: Bool? = nil, heartbeatEnable: Bool? = nil, heartbeatFrequency: Int? = nil, heartbeatSchema: String? = nil, mapBooleanAsBoolean: Bool? = nil, mapJsonbAsClob: Bool? = nil, mapLongVarcharAs: LongVarcharMappingType? = nil, maxFileSize: Int? = nil, password: String? = nil, pluginName: PluginNameValue? = nil, port: Int? = nil, secretsManagerAccessRoleArn: String? = nil, secretsManagerSecretId: String? = nil, serverName: String? = nil, slotName: String? = nil, trimSpaceInChar: Bool? = nil, username: String? = nil) { + public init(afterConnectScript: String? = nil, babelfishDatabaseName: String? = nil, captureDdls: Bool? = nil, databaseMode: DatabaseMode? = nil, databaseName: String? = nil, ddlArtifactsSchema: String? = nil, disableUnicodeSourceFilter: Bool? = nil, executeTimeout: Int? = nil, failTasksOnLobTruncation: Bool? = nil, heartbeatEnable: Bool? = nil, heartbeatFrequency: Int? = nil, heartbeatSchema: String? = nil, mapBooleanAsBoolean: Bool? = nil, mapJsonbAsClob: Bool? = nil, mapLongVarcharAs: LongVarcharMappingType? = nil, maxFileSize: Int? = nil, password: String? = nil, pluginName: PluginNameValue? = nil, port: Int? = nil, secretsManagerAccessRoleArn: String? = nil, secretsManagerSecretId: String? = nil, serverName: String? = nil, slotName: String? = nil, trimSpaceInChar: Bool? = nil, username: String? = nil) { self.afterConnectScript = afterConnectScript self.babelfishDatabaseName = babelfishDatabaseName self.captureDdls = captureDdls self.databaseMode = databaseMode self.databaseName = databaseName self.ddlArtifactsSchema = ddlArtifactsSchema + self.disableUnicodeSourceFilter = disableUnicodeSourceFilter self.executeTimeout = executeTimeout self.failTasksOnLobTruncation = failTasksOnLobTruncation self.heartbeatEnable = heartbeatEnable @@ -6725,6 +6794,7 @@ extension DatabaseMigrationService { case databaseMode = "DatabaseMode" case databaseName = "DatabaseName" case ddlArtifactsSchema = "DdlArtifactsSchema" + case disableUnicodeSourceFilter = "DisableUnicodeSourceFilter" case executeTimeout = "ExecuteTimeout" case failTasksOnLobTruncation = "FailTasksOnLobTruncation" case heartbeatEnable = "HeartbeatEnable" @@ -7399,7 +7469,7 @@ extension DatabaseMigrationService { public let replicationUpdateTime: Date? /// The Amazon Resource Name for an existing Endpoint the serverless replication uses for its data source. public let sourceEndpointArn: String? - /// The replication type. + /// The type of replication to start. public let startReplicationType: String? /// The current status of the serverless replication. public let status: String? @@ -7523,6 +7593,8 @@ extension DatabaseMigrationService { public let freeUntil: Date? /// The time the replication instance was created. public let instanceCreateTime: Date? + /// Specifies the ID of the secret that stores the key cache file required for kerberos authentication, when replicating an instance. + public let kerberosAuthenticationSettings: KerberosAuthenticationSettings? /// An KMS key identifier that is used to encrypt the data on the replication instance. If you don't specify a value for the KmsKeyId parameter, then DMS uses your default encryption key. KMS creates the default encryption key for your Amazon Web Services account. Your Amazon Web Services account has a different default encryption key for each Amazon Web Services Region. public let kmsKeyId: String? /// Specifies whether the replication instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the Multi-AZ parameter is set to true. @@ -7561,7 +7633,7 @@ extension DatabaseMigrationService { public let vpcSecurityGroups: [VpcSecurityGroupMembership]? @inlinable - public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, dnsNameServers: String? = nil, engineVersion: String? = nil, freeUntil: Date? = nil, instanceCreateTime: Date? = nil, kmsKeyId: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, pendingModifiedValues: ReplicationPendingModifiedValues? = nil, preferredMaintenanceWindow: String? = nil, publiclyAccessible: Bool? = nil, replicationInstanceArn: String? = nil, replicationInstanceClass: String? = nil, replicationInstanceIdentifier: String? = nil, replicationInstanceIpv6Addresses: [String]? = nil, replicationInstancePrivateIpAddresses: [String]? = nil, replicationInstancePublicIpAddresses: [String]? = nil, replicationInstanceStatus: String? = nil, replicationSubnetGroup: ReplicationSubnetGroup? = nil, secondaryAvailabilityZone: String? = nil, vpcSecurityGroups: [VpcSecurityGroupMembership]? = nil) { + public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, dnsNameServers: String? = nil, engineVersion: String? = nil, freeUntil: Date? = nil, instanceCreateTime: Date? = nil, kerberosAuthenticationSettings: KerberosAuthenticationSettings? = nil, kmsKeyId: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, pendingModifiedValues: ReplicationPendingModifiedValues? = nil, preferredMaintenanceWindow: String? = nil, publiclyAccessible: Bool? = nil, replicationInstanceArn: String? = nil, replicationInstanceClass: String? = nil, replicationInstanceIdentifier: String? = nil, replicationInstanceIpv6Addresses: [String]? = nil, replicationInstancePrivateIpAddresses: [String]? = nil, replicationInstancePublicIpAddresses: [String]? = nil, replicationInstanceStatus: String? = nil, replicationSubnetGroup: ReplicationSubnetGroup? = nil, secondaryAvailabilityZone: String? = nil, vpcSecurityGroups: [VpcSecurityGroupMembership]? = nil) { self.allocatedStorage = allocatedStorage self.autoMinorVersionUpgrade = autoMinorVersionUpgrade self.availabilityZone = availabilityZone @@ -7569,6 +7641,7 @@ extension DatabaseMigrationService { self.engineVersion = engineVersion self.freeUntil = freeUntil self.instanceCreateTime = instanceCreateTime + self.kerberosAuthenticationSettings = kerberosAuthenticationSettings self.kmsKeyId = kmsKeyId self.multiAZ = multiAZ self.networkType = networkType @@ -7591,7 +7664,7 @@ extension DatabaseMigrationService { @available(*, deprecated, message: "Members replicationInstancePrivateIpAddress, replicationInstancePublicIpAddress have been deprecated") @inlinable - public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, dnsNameServers: String? = nil, engineVersion: String? = nil, freeUntil: Date? = nil, instanceCreateTime: Date? = nil, kmsKeyId: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, pendingModifiedValues: ReplicationPendingModifiedValues? = nil, preferredMaintenanceWindow: String? = nil, publiclyAccessible: Bool? = nil, replicationInstanceArn: String? = nil, replicationInstanceClass: String? = nil, replicationInstanceIdentifier: String? = nil, replicationInstanceIpv6Addresses: [String]? = nil, replicationInstancePrivateIpAddress: String? = nil, replicationInstancePrivateIpAddresses: [String]? = nil, replicationInstancePublicIpAddress: String? = nil, replicationInstancePublicIpAddresses: [String]? = nil, replicationInstanceStatus: String? = nil, replicationSubnetGroup: ReplicationSubnetGroup? = nil, secondaryAvailabilityZone: String? = nil, vpcSecurityGroups: [VpcSecurityGroupMembership]? = nil) { + public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, dnsNameServers: String? = nil, engineVersion: String? = nil, freeUntil: Date? = nil, instanceCreateTime: Date? = nil, kerberosAuthenticationSettings: KerberosAuthenticationSettings? = nil, kmsKeyId: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, pendingModifiedValues: ReplicationPendingModifiedValues? = nil, preferredMaintenanceWindow: String? = nil, publiclyAccessible: Bool? = nil, replicationInstanceArn: String? = nil, replicationInstanceClass: String? = nil, replicationInstanceIdentifier: String? = nil, replicationInstanceIpv6Addresses: [String]? = nil, replicationInstancePrivateIpAddress: String? = nil, replicationInstancePrivateIpAddresses: [String]? = nil, replicationInstancePublicIpAddress: String? = nil, replicationInstancePublicIpAddresses: [String]? = nil, replicationInstanceStatus: String? = nil, replicationSubnetGroup: ReplicationSubnetGroup? = nil, secondaryAvailabilityZone: String? = nil, vpcSecurityGroups: [VpcSecurityGroupMembership]? = nil) { self.allocatedStorage = allocatedStorage self.autoMinorVersionUpgrade = autoMinorVersionUpgrade self.availabilityZone = availabilityZone @@ -7599,6 +7672,7 @@ extension DatabaseMigrationService { self.engineVersion = engineVersion self.freeUntil = freeUntil self.instanceCreateTime = instanceCreateTime + self.kerberosAuthenticationSettings = kerberosAuthenticationSettings self.kmsKeyId = kmsKeyId self.multiAZ = multiAZ self.networkType = networkType @@ -7627,6 +7701,7 @@ extension DatabaseMigrationService { case engineVersion = "EngineVersion" case freeUntil = "FreeUntil" case instanceCreateTime = "InstanceCreateTime" + case kerberosAuthenticationSettings = "KerberosAuthenticationSettings" case kmsKeyId = "KmsKeyId" case multiAZ = "MultiAZ" case networkType = "NetworkType" @@ -7817,7 +7892,7 @@ extension DatabaseMigrationService { public let sourceEndpointArn: String? /// The status of the replication task. This response parameter can return one of the following values: "moving" – The task is being moved in response to running the MoveReplicationTask operation. "creating" – The task is being created in response to running the CreateReplicationTask operation. "deleting" – The task is being deleted in response to running the DeleteReplicationTask operation. "failed" – The task failed to successfully complete the database migration in response to running the StartReplicationTask operation. "failed-move" – The task failed to move in response to running the MoveReplicationTask operation. "modifying" – The task definition is being modified in response to running the ModifyReplicationTask operation. "ready" – The task is in a ready state where it can respond to other task operations, such as StartReplicationTask or DeleteReplicationTask . "running" – The task is performing a database migration in response to running the StartReplicationTask operation. "starting" – The task is preparing to perform a database migration in response to running the StartReplicationTask operation. "stopped" – The task has stopped in response to running the StopReplicationTask operation. "stopping" – The task is preparing to stop in response to running the StopReplicationTask operation. "testing" – The database migration specified for this task is being tested in response to running either the StartReplicationTaskAssessmentRun or the StartReplicationTaskAssessment operation. StartReplicationTaskAssessmentRun is an improved premigration task assessment operation. The StartReplicationTaskAssessment operation assesses data type compatibility only between the source and target database of a given migration task. In contrast, StartReplicationTaskAssessmentRun enables you to specify a variety of premigration task assessments in addition to data type compatibility. These assessments include ones for the validity of primary key definitions and likely issues with database migration performance, among others. public let status: String? - /// The reason the replication task was stopped. This response parameter can return one of the following values: "Stop Reason NORMAL" "Stop Reason RECOVERABLE_ERROR" "Stop Reason FATAL_ERROR" "Stop Reason FULL_LOAD_ONLY_FINISHED" "Stop Reason STOPPED_AFTER_FULL_LOAD" – Full load completed, with cached changes not applied "Stop Reason STOPPED_AFTER_CACHED_EVENTS" – Full load completed, with cached changes applied "Stop Reason EXPRESS_LICENSE_LIMITS_REACHED" "Stop Reason STOPPED_AFTER_DDL_APPLY" – User-defined stop task after DDL applied "Stop Reason STOPPED_DUE_TO_LOW_MEMORY" "Stop Reason STOPPED_DUE_TO_LOW_DISK" "Stop Reason STOPPED_AT_SERVER_TIME" – User-defined server time for stopping task "Stop Reason STOPPED_AT_COMMIT_TIME" – User-defined commit time for stopping task "Stop Reason RECONFIGURATION_RESTART" "Stop Reason RECYCLE_TASK" + /// The reason the replication task was stopped. This response parameter can return one of the following values: "Stop Reason NORMAL" – The task completed successfully with no additional information returned. "Stop Reason RECOVERABLE_ERROR" "Stop Reason FATAL_ERROR" "Stop Reason FULL_LOAD_ONLY_FINISHED" – The task completed the full load phase. DMS applied cached changes if you set StopTaskCachedChangesApplied to true. "Stop Reason STOPPED_AFTER_FULL_LOAD" – Full load completed, with cached changes not applied "Stop Reason STOPPED_AFTER_CACHED_EVENTS" – Full load completed, with cached changes applied "Stop Reason EXPRESS_LICENSE_LIMITS_REACHED" "Stop Reason STOPPED_AFTER_DDL_APPLY" – User-defined stop task after DDL applied "Stop Reason STOPPED_DUE_TO_LOW_MEMORY" "Stop Reason STOPPED_DUE_TO_LOW_DISK" "Stop Reason STOPPED_AT_SERVER_TIME" – User-defined server time for stopping task "Stop Reason STOPPED_AT_COMMIT_TIME" – User-defined commit time for stopping task "Stop Reason RECONFIGURATION_RESTART" "Stop Reason RECYCLE_TASK" public let stopReason: String? /// Table mappings specified in the task. public let tableMappings: String? @@ -7939,7 +8014,7 @@ extension DatabaseMigrationService { public let resultStatistic: ReplicationTaskAssessmentRunResultStatistic? /// ARN of the service role used to start the assessment run using the StartReplicationTaskAssessmentRun operation. The role must allow the iam:PassRole action. public let serviceAccessRoleArn: String? - /// Assessment run status. This status can have one of the following values: "cancelling" – The assessment run was canceled by the CancelReplicationTaskAssessmentRun operation. "deleting" – The assessment run was deleted by the DeleteReplicationTaskAssessmentRun operation. "failed" – At least one individual assessment completed with a failed status. "error-provisioning" – An internal error occurred while resources were provisioned (during provisioning status). "error-executing" – An internal error occurred while individual assessments ran (during running status). "invalid state" – The assessment run is in an unknown state. "passed" – All individual assessments have completed, and none has a failed status. "provisioning" – Resources required to run individual assessments are being provisioned. "running" – Individual assessments are being run. "starting" – The assessment run is starting, but resources are not yet being provisioned for individual assessments. + /// Assessment run status. This status can have one of the following values: "cancelling" – The assessment run was canceled by the CancelReplicationTaskAssessmentRun operation. "deleting" – The assessment run was deleted by the DeleteReplicationTaskAssessmentRun operation. "failed" – At least one individual assessment completed with a failed status. "error-provisioning" – An internal error occurred while resources were provisioned (during provisioning status). "error-executing" – An internal error occurred while individual assessments ran (during running status). "invalid state" – The assessment run is in an unknown state. "passed" – All individual assessments have completed, and none has a failed status. "provisioning" – Resources required to run individual assessments are being provisioned. "running" – Individual assessments are being run. "starting" – The assessment run is starting, but resources are not yet being provisioned for individual assessments. "warning" – At least one individual assessment completed with a warning status. public let status: String? @inlinable @@ -8776,7 +8851,7 @@ extension DatabaseMigrationService { public let cdcStopPosition: String? /// The Amazon Resource Name of the replication for which to start replication. public let replicationConfigArn: String - /// The replication type. + /// The replication type. When the replication type is full-load or full-load-and-cdc, the only valid value for the first run of the replication is start-replication. This option will start the replication. You can also use ReloadTables to reload specific tables that failed during replication instead of restarting the replication. The resume-processing option isn't applicable for a full-load replication, because you can't resume partially loaded tables during the full load phase. For a full-load-and-cdc replication, DMS migrates table data, and then applies data changes that occur on the source. To load all the tables again, and start capturing source changes, use reload-target. Otherwise use resume-processing, to replicate the changes from the last stop position. public let startReplicationType: String @inlinable diff --git a/Sources/Soto/Services/DynamoDBStreams/DynamoDBStreams_api.swift b/Sources/Soto/Services/DynamoDBStreams/DynamoDBStreams_api.swift index 0c1fb33121..f758c07fb6 100644 --- a/Sources/Soto/Services/DynamoDBStreams/DynamoDBStreams_api.swift +++ b/Sources/Soto/Services/DynamoDBStreams/DynamoDBStreams_api.swift @@ -88,17 +88,8 @@ public struct DynamoDBStreams: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ [.fips]: .init(endpoints: [ - "ca-central-1": "streams.dynamodb-fips.ca-central-1.amazonaws.com", - "ca-west-1": "streams.dynamodb-fips.ca-west-1.amazonaws.com", - "us-east-1": "streams.dynamodb-fips.us-east-1.amazonaws.com", - "us-east-2": "streams.dynamodb-fips.us-east-2.amazonaws.com", - "us-gov-east-1": "streams.dynamodb-fips.us-gov-east-1.amazonaws.com", - "us-gov-west-1": "streams.dynamodb-fips.us-gov-west-1.amazonaws.com", - "us-iso-east-1": "streams.dynamodb-fips.us-iso-east-1.c2s.ic.gov", - "us-iso-west-1": "streams.dynamodb-fips.us-iso-west-1.c2s.ic.gov", - "us-isob-east-1": "streams.dynamodb-fips.us-isob-east-1.sc2s.sgov.gov", - "us-west-1": "streams.dynamodb-fips.us-west-1.amazonaws.com", - "us-west-2": "streams.dynamodb-fips.us-west-2.amazonaws.com" + "us-gov-east-1": "streams.dynamodb.us-gov-east-1.amazonaws.com", + "us-gov-west-1": "streams.dynamodb.us-gov-west-1.amazonaws.com" ]) ]} diff --git a/Sources/Soto/Services/EC2/EC2_api.swift b/Sources/Soto/Services/EC2/EC2_api.swift index 45ea48080a..9305ff6f95 100644 --- a/Sources/Soto/Services/EC2/EC2_api.swift +++ b/Sources/Soto/Services/EC2/EC2_api.swift @@ -4888,7 +4888,7 @@ public struct EC2: AWSService { return try await self.createSecurityGroup(input, logger: logger) } - /// Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance. You can create snapshots of volumes in a Region and volumes on an Outpost. If you create a snapshot of a volume in a Region, the snapshot must be stored in the same Region as the volume. If you create a snapshot of a volume on an Outpost, the snapshot can be stored on the same Outpost as the volume, or in the Region for that Outpost. When a snapshot is created, any Amazon Web Services Marketplace product codes that are associated with the source volume are propagated to the snapshot. You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your Amazon EBS volume at the time the snapshot command is issued; this might exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending. When you create a snapshot for an EBS volume that serves as a root device, we recommend that you stop the instance before taking the snapshot. Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected. You can tag your snapshots during creation. For more information, see Tag your Amazon EC2 resources in the Amazon EC2 User Guide. For more information, see Amazon EBS and Amazon EBS encryption in the Amazon EBS User Guide. + /// Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance. The location of the source EBS volume determines where you can create the snapshot. If the source volume is in a Region, you must create the snapshot in the same Region as the volume. If the source volume is in a Local Zone, you can create the snapshot in the same Local Zone or in parent Amazon Web Services Region. If the source volume is on an Outpost, you can create the snapshot on the same Outpost or in its parent Amazon Web Services Region. When a snapshot is created, any Amazon Web Services Marketplace product codes that are associated with the source volume are propagated to the snapshot. You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your Amazon EBS volume at the time the snapshot command is issued; this might exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending. When you create a snapshot for an EBS volume that serves as a root device, we recommend that you stop the instance before taking the snapshot. Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected. For more information, Amazon EBS encryption in the Amazon EBS User Guide. @Sendable @inlinable public func createSnapshot(_ input: CreateSnapshotRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> Snapshot { @@ -4901,12 +4901,13 @@ public struct EC2: AWSService { logger: logger ) } - /// Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance. You can create snapshots of volumes in a Region and volumes on an Outpost. If you create a snapshot of a volume in a Region, the snapshot must be stored in the same Region as the volume. If you create a snapshot of a volume on an Outpost, the snapshot can be stored on the same Outpost as the volume, or in the Region for that Outpost. When a snapshot is created, any Amazon Web Services Marketplace product codes that are associated with the source volume are propagated to the snapshot. You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your Amazon EBS volume at the time the snapshot command is issued; this might exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending. When you create a snapshot for an EBS volume that serves as a root device, we recommend that you stop the instance before taking the snapshot. Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected. You can tag your snapshots during creation. For more information, see Tag your Amazon EC2 resources in the Amazon EC2 User Guide. For more information, see Amazon EBS and Amazon EBS encryption in the Amazon EBS User Guide. + /// Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance. The location of the source EBS volume determines where you can create the snapshot. If the source volume is in a Region, you must create the snapshot in the same Region as the volume. If the source volume is in a Local Zone, you can create the snapshot in the same Local Zone or in parent Amazon Web Services Region. If the source volume is on an Outpost, you can create the snapshot on the same Outpost or in its parent Amazon Web Services Region. When a snapshot is created, any Amazon Web Services Marketplace product codes that are associated with the source volume are propagated to the snapshot. You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your Amazon EBS volume at the time the snapshot command is issued; this might exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending. When you create a snapshot for an EBS volume that serves as a root device, we recommend that you stop the instance before taking the snapshot. Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected. For more information, Amazon EBS encryption in the Amazon EBS User Guide. /// /// Parameters: /// - description: A description for the snapshot. /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - /// - outpostArn: The Amazon Resource Name (ARN) of the Outpost on which to create a local snapshot. To create a snapshot of a volume in a Region, omit this parameter. The snapshot is created in the same Region as the volume. To create a snapshot of a volume on an Outpost and store the snapshot in the Region, omit this parameter. The snapshot is created in the Region for the Outpost. To create a snapshot of a volume on an Outpost and store the snapshot on an Outpost, specify the ARN of the destination Outpost. The snapshot must be created on the same Outpost as the volume. For more information, see Create local snapshots from volumes on an Outpost in the Amazon EBS User Guide. + /// - location: Only supported for volumes in Local Zones. If the source volume is not in a Local Zone, omit this parameter. To create a local snapshot in the same Local Zone as the source volume, specify local. To create a regional snapshot in the parent Region of the Local Zone, specify regional or omit this parameter. Default value: regional + /// - outpostArn: Only supported for volumes on Outposts. If the source volume is not on an Outpost, omit this parameter. To create the snapshot on the same Outpost as the source volume, specify the ARN of that Outpost. The snapshot must be created on the same Outpost as the volume. To create the snapshot in the parent Region of the Outpost, omit this parameter. For more information, see Create local snapshots from volumes on an Outpost in the Amazon EBS User Guide. /// - tagSpecifications: The tags to apply to the snapshot during creation. /// - volumeId: The ID of the Amazon EBS volume. /// - logger: Logger use during operation @@ -4914,6 +4915,7 @@ public struct EC2: AWSService { public func createSnapshot( description: String? = nil, dryRun: Bool? = nil, + location: SnapshotLocationEnum? = nil, outpostArn: String? = nil, tagSpecifications: [TagSpecification]? = nil, volumeId: String? = nil, @@ -4922,6 +4924,7 @@ public struct EC2: AWSService { let input = CreateSnapshotRequest( description: description, dryRun: dryRun, + location: location, outpostArn: outpostArn, tagSpecifications: tagSpecifications, volumeId: volumeId @@ -4929,7 +4932,7 @@ public struct EC2: AWSService { return try await self.createSnapshot(input, logger: logger) } - /// Creates crash-consistent snapshots of multiple EBS volumes and stores the data in S3. Volumes are chosen by specifying an instance. Any attached volumes will produce one snapshot each that is crash-consistent across the instance. You can include all of the volumes currently attached to the instance, or you can exclude the root volume or specific data (non-root) volumes from the multi-volume snapshot set. You can create multi-volume snapshots of instances in a Region and instances on an Outpost. If you create snapshots from an instance in a Region, the snapshots must be stored in the same Region as the instance. If you create snapshots from an instance on an Outpost, the snapshots can be stored on the same Outpost as the instance, or in the Region for that Outpost. + /// Creates crash-consistent snapshots of multiple EBS volumes attached to an Amazon EC2 instance. Volumes are chosen by specifying an instance. Each volume attached to the specified instance will produce one snapshot that is crash-consistent across the instance. You can include all of the volumes currently attached to the instance, or you can exclude the root volume or specific data (non-root) volumes from the multi-volume snapshot set. The location of the source instance determines where you can create the snapshots. If the source instance is in a Region, you must create the snapshots in the same Region as the instance. If the source instance is in a Local Zone, you can create the snapshots in the same Local Zone or in parent Amazon Web Services Region. If the source instance is on an Outpost, you can create the snapshots on the same Outpost or in its parent Amazon Web Services Region. @Sendable @inlinable public func createSnapshots(_ input: CreateSnapshotsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateSnapshotsResult { @@ -4942,14 +4945,15 @@ public struct EC2: AWSService { logger: logger ) } - /// Creates crash-consistent snapshots of multiple EBS volumes and stores the data in S3. Volumes are chosen by specifying an instance. Any attached volumes will produce one snapshot each that is crash-consistent across the instance. You can include all of the volumes currently attached to the instance, or you can exclude the root volume or specific data (non-root) volumes from the multi-volume snapshot set. You can create multi-volume snapshots of instances in a Region and instances on an Outpost. If you create snapshots from an instance in a Region, the snapshots must be stored in the same Region as the instance. If you create snapshots from an instance on an Outpost, the snapshots can be stored on the same Outpost as the instance, or in the Region for that Outpost. + /// Creates crash-consistent snapshots of multiple EBS volumes attached to an Amazon EC2 instance. Volumes are chosen by specifying an instance. Each volume attached to the specified instance will produce one snapshot that is crash-consistent across the instance. You can include all of the volumes currently attached to the instance, or you can exclude the root volume or specific data (non-root) volumes from the multi-volume snapshot set. The location of the source instance determines where you can create the snapshots. If the source instance is in a Region, you must create the snapshots in the same Region as the instance. If the source instance is in a Local Zone, you can create the snapshots in the same Local Zone or in parent Amazon Web Services Region. If the source instance is on an Outpost, you can create the snapshots on the same Outpost or in its parent Amazon Web Services Region. /// /// Parameters: /// - copyTagsFromSource: Copies the tags from the specified volume to corresponding snapshot. /// - description: A description propagated to every snapshot specified by the instance. /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - instanceSpecification: The instance to specify which volumes should be included in the snapshots. - /// - outpostArn: The Amazon Resource Name (ARN) of the Outpost on which to create the local snapshots. To create snapshots from an instance in a Region, omit this parameter. The snapshots are created in the same Region as the instance. To create snapshots from an instance on an Outpost and store the snapshots in the Region, omit this parameter. The snapshots are created in the Region for the Outpost. To create snapshots from an instance on an Outpost and store the snapshots on an Outpost, specify the ARN of the destination Outpost. The snapshots must be created on the same Outpost as the instance. For more information, see Create multi-volume local snapshots from instances on an Outpost in the Amazon EBS User Guide. + /// - location: Only supported for instances in Local Zones. If the source instance is not in a Local Zone, omit this parameter. To create local snapshots in the same Local Zone as the source instance, specify local. To create a regional snapshots in the parent Region of the Local Zone, specify regional or omit this parameter. Default value: regional + /// - outpostArn: Only supported for instances on Outposts. If the source instance is not on an Outpost, omit this parameter. To create the snapshots on the same Outpost as the source instance, specify the ARN of that Outpost. The snapshots must be created on the same Outpost as the instance. To create the snapshots in the parent Region of the Outpost, omit this parameter. For more information, see Create local snapshots from volumes on an Outpost in the Amazon EBS User Guide. /// - tagSpecifications: Tags to apply to every snapshot specified by the instance. /// - logger: Logger use during operation @inlinable @@ -4958,6 +4962,7 @@ public struct EC2: AWSService { description: String? = nil, dryRun: Bool? = nil, instanceSpecification: InstanceSpecification? = nil, + location: SnapshotLocationEnum? = nil, outpostArn: String? = nil, tagSpecifications: [TagSpecification]? = nil, logger: Logger = AWSClient.loggingDisabled @@ -4967,6 +4972,7 @@ public struct EC2: AWSService { description: description, dryRun: dryRun, instanceSpecification: instanceSpecification, + location: location, outpostArn: outpostArn, tagSpecifications: tagSpecifications ) @@ -7980,7 +7986,7 @@ public struct EC2: AWSService { /// DependencyViolation. @Sendable @inlinable - public func deleteSecurityGroup(_ input: DeleteSecurityGroupRequest, logger: Logger = AWSClient.loggingDisabled) async throws { + public func deleteSecurityGroup(_ input: DeleteSecurityGroupRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteSecurityGroupResult { try await self.client.execute( operation: "DeleteSecurityGroup", path: "/", @@ -8005,7 +8011,7 @@ public struct EC2: AWSService { groupId: String? = nil, groupName: String? = nil, logger: Logger = AWSClient.loggingDisabled - ) async throws { + ) async throws -> DeleteSecurityGroupResult { let input = DeleteSecurityGroupRequest( dryRun: dryRun, groupId: groupId, @@ -11640,7 +11646,7 @@ public struct EC2: AWSService { return try await self.describeInstanceStatus(input, logger: logger) } - /// Describes a tree-based hierarchy that represents the physical host placement of your EC2 instances within an Availability Zone or Local Zone. You can use this information to determine the relative proximity of your EC2 instances within the Amazon Web Services network to support your tightly coupled workloads. Limitations Supported zones Availability Zone Local Zone Supported instance types hpc6a.48xlarge | hpc6id.32xlarge | hpc7a.12xlarge | hpc7a.24xlarge | hpc7a.48xlarge | hpc7a.96xlarge | hpc7g.4xlarge | hpc7g.8xlarge | hpc7g.16xlarge p3dn.24xlarge | p4d.24xlarge | p4de.24xlarge | p5.48xlarge | p5e.48xlarge trn1.2xlarge | trn1.32xlarge | trn1n.32xlarge For more information, see Amazon EC2 instance topology in the Amazon EC2 User Guide. + /// Describes a tree-based hierarchy that represents the physical host placement of your EC2 instances within an Availability Zone or Local Zone. You can use this information to determine the relative proximity of your EC2 instances within the Amazon Web Services network to support your tightly coupled workloads. Limitations Supported zones Availability Zone Local Zone Supported instance types hpc6a.48xlarge | hpc6id.32xlarge | hpc7a.12xlarge | hpc7a.24xlarge | hpc7a.48xlarge | hpc7a.96xlarge | hpc7g.4xlarge | hpc7g.8xlarge | hpc7g.16xlarge p3dn.24xlarge | p4d.24xlarge | p4de.24xlarge | p5.48xlarge | p5e.48xlarge | p5en.48xlarge trn1.2xlarge | trn1.32xlarge | trn1n.32xlarge For more information, see Amazon EC2 instance topology in the Amazon EC2 User Guide. @Sendable @inlinable public func describeInstanceTopology(_ input: DescribeInstanceTopologyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeInstanceTopologyResult { @@ -11653,7 +11659,7 @@ public struct EC2: AWSService { logger: logger ) } - /// Describes a tree-based hierarchy that represents the physical host placement of your EC2 instances within an Availability Zone or Local Zone. You can use this information to determine the relative proximity of your EC2 instances within the Amazon Web Services network to support your tightly coupled workloads. Limitations Supported zones Availability Zone Local Zone Supported instance types hpc6a.48xlarge | hpc6id.32xlarge | hpc7a.12xlarge | hpc7a.24xlarge | hpc7a.48xlarge | hpc7a.96xlarge | hpc7g.4xlarge | hpc7g.8xlarge | hpc7g.16xlarge p3dn.24xlarge | p4d.24xlarge | p4de.24xlarge | p5.48xlarge | p5e.48xlarge trn1.2xlarge | trn1.32xlarge | trn1n.32xlarge For more information, see Amazon EC2 instance topology in the Amazon EC2 User Guide. + /// Describes a tree-based hierarchy that represents the physical host placement of your EC2 instances within an Availability Zone or Local Zone. You can use this information to determine the relative proximity of your EC2 instances within the Amazon Web Services network to support your tightly coupled workloads. Limitations Supported zones Availability Zone Local Zone Supported instance types hpc6a.48xlarge | hpc6id.32xlarge | hpc7a.12xlarge | hpc7a.24xlarge | hpc7a.48xlarge | hpc7a.96xlarge | hpc7g.4xlarge | hpc7g.8xlarge | hpc7g.16xlarge p3dn.24xlarge | p4d.24xlarge | p4de.24xlarge | p5.48xlarge | p5e.48xlarge | p5en.48xlarge trn1.2xlarge | trn1.32xlarge | trn1n.32xlarge For more information, see Amazon EC2 instance topology in the Amazon EC2 User Guide. /// /// Parameters: /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -11742,7 +11748,7 @@ public struct EC2: AWSService { /// /// Parameters: /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - /// - filters: One or more filters. Filter names and values are case-sensitive. auto-recovery-supported - Indicates whether Amazon CloudWatch action based recovery is supported (true | false). bare-metal - Indicates whether it is a bare metal instance type (true | false). burstable-performance-supported - Indicates whether the instance type is a burstable performance T instance type (true | false). current-generation - Indicates whether this instance type is the latest generation instance type of an instance family (true | false). ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline bandwidth performance for an EBS-optimized instance type, in Mbps. ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage operations per second for an EBS-optimized instance type. ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline throughput performance for an EBS-optimized instance type, in MB/s. ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth performance for an EBS-optimized instance type, in Mbps. ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage operations per second for an EBS-optimized instance type. ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum throughput performance for an EBS-optimized instance type, in MB/s. ebs-info.ebs-optimized-support - Indicates whether the instance type is EBS-optimized (supported | unsupported | default). ebs-info.encryption-support - Indicates whether EBS encryption is supported (supported | unsupported). ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for EBS volumes (required | supported | unsupported). free-tier-eligible - Indicates whether the instance type is eligible to use in the free tier (true | false). hibernation-supported - Indicates whether On-Demand hibernation is supported (true | false). hypervisor - The hypervisor (nitro | xen). instance-storage-info.disk.count - The number of local disks. instance-storage-info.disk.size-in-gb - The storage size of each instance storage disk, in GB. instance-storage-info.disk.type - The storage technology for the local instance storage disks (hdd | ssd). instance-storage-info.encryption-support - Indicates whether data is encrypted at rest (required | supported | unsupported). instance-storage-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for instance store (required | supported | unsupported). instance-storage-info.total-size-in-gb - The total amount of storage available from all local instance storage, in GB. instance-storage-supported - Indicates whether the instance type has local instance storage (true | false). instance-type - The instance type (for example c5.2xlarge or c5*). memory-info.size-in-mib - The memory size. network-info.efa-info.maximum-efa-interfaces - The maximum number of Elastic Fabric Adapters (EFAs) per instance. network-info.efa-supported - Indicates whether the instance type supports Elastic Fabric Adapter (EFA) (true | false). network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is supported or required (required | supported | unsupported). network-info.encryption-in-transit-supported - Indicates whether the instance type automatically encrypts in-transit traffic between instances (true | false). network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 addresses per network interface. network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 addresses per network interface. network-info.ipv6-supported - Indicates whether the instance type supports IPv6 (true | false). network-info.maximum-network-cards - The maximum number of network cards per instance. network-info.maximum-network-interfaces - The maximum number of network interfaces per instance. network-info.network-performance - The network performance (for example, "25 Gigabit"). nitro-enclaves-support - Indicates whether Nitro Enclaves is supported (supported | unsupported). nitro-tpm-support - Indicates whether NitroTPM is supported (supported | unsupported). nitro-tpm-info.supported-versions - The supported NitroTPM version (2.0). processor-info.supported-architecture - The CPU architecture (arm64 | i386 | x86_64). processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz. processor-info.supported-features - The supported CPU features (amd-sev-snp). supported-boot-mode - The boot mode (legacy-bios | uefi). supported-root-device-type - The root device type (ebs | instance-store). supported-usage-class - The usage class (on-demand | spot | capacity-block). supported-virtualization-type - The virtualization type (hvm | paravirtual). vcpu-info.default-cores - The default number of cores for the instance type. vcpu-info.default-threads-per-core - The default number of threads per core for the instance type. vcpu-info.default-vcpus - The default number of vCPUs for the instance type. vcpu-info.valid-cores - The number of cores that can be configured for the instance type. vcpu-info.valid-threads-per-core - The number of threads per core that can be configured for the instance type. For example, "1" or "1,2". + /// - filters: One or more filters. Filter names and values are case-sensitive. auto-recovery-supported - Indicates whether Amazon CloudWatch action based recovery is supported (true | false). bare-metal - Indicates whether it is a bare metal instance type (true | false). burstable-performance-supported - Indicates whether the instance type is a burstable performance T instance type (true | false). current-generation - Indicates whether this instance type is the latest generation instance type of an instance family (true | false). ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline bandwidth performance for an EBS-optimized instance type, in Mbps. ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage operations per second for an EBS-optimized instance type. ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline throughput performance for an EBS-optimized instance type, in MB/s. ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth performance for an EBS-optimized instance type, in Mbps. ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage operations per second for an EBS-optimized instance type. ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum throughput performance for an EBS-optimized instance type, in MB/s. ebs-info.ebs-optimized-support - Indicates whether the instance type is EBS-optimized (supported | unsupported | default). ebs-info.encryption-support - Indicates whether EBS encryption is supported (supported | unsupported). ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for EBS volumes (required | supported | unsupported). free-tier-eligible - Indicates whether the instance type is eligible to use in the free tier (true | false). hibernation-supported - Indicates whether On-Demand hibernation is supported (true | false). hypervisor - The hypervisor (nitro | xen). instance-storage-info.disk.count - The number of local disks. instance-storage-info.disk.size-in-gb - The storage size of each instance storage disk, in GB. instance-storage-info.disk.type - The storage technology for the local instance storage disks (hdd | ssd). instance-storage-info.encryption-support - Indicates whether data is encrypted at rest (required | supported | unsupported). instance-storage-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for instance store (required | supported | unsupported). instance-storage-info.total-size-in-gb - The total amount of storage available from all local instance storage, in GB. instance-storage-supported - Indicates whether the instance type has local instance storage (true | false). instance-type - The instance type (for example c5.2xlarge or c5*). memory-info.size-in-mib - The memory size. network-info.bandwidth-weightings - For instances that support bandwidth weighting to boost performance (default, vpc-1, ebs-1). network-info.efa-info.maximum-efa-interfaces - The maximum number of Elastic Fabric Adapters (EFAs) per instance. network-info.efa-supported - Indicates whether the instance type supports Elastic Fabric Adapter (EFA) (true | false). network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is supported or required (required | supported | unsupported). network-info.encryption-in-transit-supported - Indicates whether the instance type automatically encrypts in-transit traffic between instances (true | false). network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 addresses per network interface. network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 addresses per network interface. network-info.ipv6-supported - Indicates whether the instance type supports IPv6 (true | false). network-info.maximum-network-cards - The maximum number of network cards per instance. network-info.maximum-network-interfaces - The maximum number of network interfaces per instance. network-info.network-performance - The network performance (for example, "25 Gigabit"). nitro-enclaves-support - Indicates whether Nitro Enclaves is supported (supported | unsupported). nitro-tpm-support - Indicates whether NitroTPM is supported (supported | unsupported). nitro-tpm-info.supported-versions - The supported NitroTPM version (2.0). processor-info.supported-architecture - The CPU architecture (arm64 | i386 | x86_64). processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz. processor-info.supported-features - The supported CPU features (amd-sev-snp). supported-boot-mode - The boot mode (legacy-bios | uefi). supported-root-device-type - The root device type (ebs | instance-store). supported-usage-class - The usage class (on-demand | spot | capacity-block). supported-virtualization-type - The virtualization type (hvm | paravirtual). vcpu-info.default-cores - The default number of cores for the instance type. vcpu-info.default-threads-per-core - The default number of threads per core for the instance type. vcpu-info.default-vcpus - The default number of vCPUs for the instance type. vcpu-info.valid-cores - The number of cores that can be configured for the instance type. vcpu-info.valid-threads-per-core - The number of threads per core that can be configured for the instance type. For example, "1" or "1,2". /// - instanceTypes: The instance types. /// - maxResults: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. /// - nextToken: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. @@ -11783,7 +11789,7 @@ public struct EC2: AWSService { /// /// Parameters: /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.operator.managed - A Boolean that indicates whether the instance has a managed network interface. network-interface.operator.principal - The principal that manages the network interface. Only valid for instances with managed network interfaces, where managed is true. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. operator.managed - A Boolean that indicates whether this is a managed instance. operator.principal - The principal that manages the instance. Only valid for managed instances, where managed is true. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. + /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.operator.managed - A Boolean that indicates whether the instance has a managed network interface. network-interface.operator.principal - The principal that manages the network interface. Only valid for instances with managed network interfaces, where managed is true. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. network-performance-options.bandwidth-weighting - Where the performance boost is applied, if applicable. Valid values: default, vpc-1, ebs-1. operator.managed - A Boolean that indicates whether this is a managed instance. operator.principal - The principal that manages the instance. Only valid for managed instances, where managed is true. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. /// - instanceIds: The instance IDs. Default: Describes all your instances. /// - maxResults: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. /// - nextToken: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. @@ -21547,6 +21553,41 @@ public struct EC2: AWSService { return try await self.modifyInstanceMetadataOptions(input, logger: logger) } + /// Change the configuration of the network performance options for an existing instance. + @Sendable + @inlinable + public func modifyInstanceNetworkPerformanceOptions(_ input: ModifyInstanceNetworkPerformanceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ModifyInstanceNetworkPerformanceResult { + try await self.client.execute( + operation: "ModifyInstanceNetworkPerformanceOptions", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Change the configuration of the network performance options for an existing instance. + /// + /// Parameters: + /// - bandwidthWeighting: Specify the bandwidth weighting option to boost the associated type of baseline bandwidth, as follows: default This option uses the standard bandwidth configuration for your instance type. vpc-1 This option boosts your networking baseline bandwidth and reduces your EBS baseline bandwidth. ebs-1 This option boosts your EBS baseline bandwidth and reduces your networking baseline bandwidth. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - instanceId: The ID of the instance to update. + /// - logger: Logger use during operation + @inlinable + public func modifyInstanceNetworkPerformanceOptions( + bandwidthWeighting: InstanceBandwidthWeighting? = nil, + dryRun: Bool? = nil, + instanceId: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ModifyInstanceNetworkPerformanceResult { + let input = ModifyInstanceNetworkPerformanceRequest( + bandwidthWeighting: bandwidthWeighting, + dryRun: dryRun, + instanceId: instanceId + ) + return try await self.modifyInstanceNetworkPerformanceOptions(input, logger: logger) + } + /// Modifies the placement attributes for a specified instance. You can do the following: Modify the affinity between an instance and a Dedicated Host. When affinity is set to host and the instance is not associated with a specific Dedicated Host, the next time the instance is started, it is automatically associated with the host on which it lands. If the instance is restarted or rebooted, this relationship persists. Change the Dedicated Host with which an instance is associated. Change the instance tenancy of an instance. Move an instance to or from a placement group. At least one attribute for affinity, host ID, tenancy, or placement group name must be specified in the request. Affinity and tenancy can be modified in the same request. To modify the host ID, tenancy, placement group, or partition for an instance, the instance must be in the stopped state. @Sendable @inlinable @@ -25751,6 +25792,7 @@ public struct EC2: AWSService { /// - minCount: The minimum number of instances to launch. If you specify a value that is more capacity than Amazon EC2 can provide in the target Availability Zone, Amazon EC2 does not launch any instances. Constraints: Between 1 and the quota for the specified instance type for your account for this Region. For more information, see Amazon EC2 instance type quotas. /// - monitoring: Specifies whether detailed monitoring is enabled for the instance. /// - networkInterfaces: The network interfaces to associate with the instance. + /// - networkPerformanceOptions: Contains settings for the network performance options for the instance. /// - operator: Reserved for internal use. /// - placement: The placement for the instance. /// - privateDnsNameOptions: The options for the instance hostname. The default values are inherited from the subnet. Applies only if creating a network interface, not attaching an existing one. @@ -25796,6 +25838,7 @@ public struct EC2: AWSService { minCount: Int? = nil, monitoring: RunInstancesMonitoringEnabled? = nil, networkInterfaces: [InstanceNetworkInterfaceSpecification]? = nil, + networkPerformanceOptions: InstanceNetworkPerformanceOptionsRequest? = nil, operator: OperatorRequest? = nil, placement: Placement? = nil, privateDnsNameOptions: PrivateDnsNameOptionsRequest? = nil, @@ -25841,6 +25884,7 @@ public struct EC2: AWSService { minCount: minCount, monitoring: monitoring, networkInterfaces: networkInterfaces, + networkPerformanceOptions: networkPerformanceOptions, operator: `operator`, placement: placement, privateDnsNameOptions: privateDnsNameOptions, @@ -26048,7 +26092,7 @@ public struct EC2: AWSService { return try await self.sendDiagnosticInterrupt(input, logger: logger) } - /// Generates an account status report. The report is generated asynchronously, and can take several hours to complete. The report provides the current status of all attributes supported by declarative policies for the accounts within the specified scope. The scope is determined by the specified TargetId, which can represent an individual account, or all the accounts that fall under the specified organizational unit (OU) or root (the entire Amazon Web Services Organization). The report is saved to your specified S3 bucket, using the following path structure (with the italicized placeholders representing your specific values): s3://amzn-s3-demo-bucket/your-optional-s3-prefix/ec2_targetId_reportId_yyyyMMddThhmmZ.csv Prerequisites for generating a report The StartDeclarativePoliciesReport API can only be called by the management account or delegated administrators for the organization. An S3 bucket must be available before generating the report (you can create a new one or use an existing one), and it must have an appropriate bucket policy. For a sample S3 policy, see Sample Amazon S3 policy under . Trusted access must be enabled for the service for which the declarative policy will enforce a baseline configuration. If you use the Amazon Web Services Organizations console, this is done automatically when you enable declarative policies. The API uses the following service principal to identify the EC2 service: ec2.amazonaws.com. For more information on how to enable trusted access with the Amazon Web Services CLI and Amazon Web Services SDKs, see Using Organizations with other Amazon Web Services services in the Amazon Web Services Organizations User Guide. Only one report per organization can be generated at a time. Attempting to generate a report while another is in progress will result in an error. For more information, including the required IAM permissions to run this API, see Generating the account status report for declarative policies in the Amazon Web Services Organizations User Guide. + /// Generates an account status report. The report is generated asynchronously, and can take several hours to complete. The report provides the current status of all attributes supported by declarative policies for the accounts within the specified scope. The scope is determined by the specified TargetId, which can represent an individual account, or all the accounts that fall under the specified organizational unit (OU) or root (the entire Amazon Web Services Organization). The report is saved to your specified S3 bucket, using the following path structure (with the italicized placeholders representing your specific values): s3://amzn-s3-demo-bucket/your-optional-s3-prefix/ec2_targetId_reportId_yyyyMMddThhmmZ.csv Prerequisites for generating a report The StartDeclarativePoliciesReport API can only be called by the management account or delegated administrators for the organization. An S3 bucket must be available before generating the report (you can create a new one or use an existing one), it must be in the same Region where the report generation request is made, and it must have an appropriate bucket policy. For a sample S3 policy, see Sample Amazon S3 policy under . Trusted access must be enabled for the service for which the declarative policy will enforce a baseline configuration. If you use the Amazon Web Services Organizations console, this is done automatically when you enable declarative policies. The API uses the following service principal to identify the EC2 service: ec2.amazonaws.com. For more information on how to enable trusted access with the Amazon Web Services CLI and Amazon Web Services SDKs, see Using Organizations with other Amazon Web Services services in the Amazon Web Services Organizations User Guide. Only one report per organization can be generated at a time. Attempting to generate a report while another is in progress will result in an error. For more information, including the required IAM permissions to run this API, see Generating the account status report for declarative policies in the Amazon Web Services Organizations User Guide. @Sendable @inlinable public func startDeclarativePoliciesReport(_ input: StartDeclarativePoliciesReportRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartDeclarativePoliciesReportResult { @@ -26061,11 +26105,11 @@ public struct EC2: AWSService { logger: logger ) } - /// Generates an account status report. The report is generated asynchronously, and can take several hours to complete. The report provides the current status of all attributes supported by declarative policies for the accounts within the specified scope. The scope is determined by the specified TargetId, which can represent an individual account, or all the accounts that fall under the specified organizational unit (OU) or root (the entire Amazon Web Services Organization). The report is saved to your specified S3 bucket, using the following path structure (with the italicized placeholders representing your specific values): s3://amzn-s3-demo-bucket/your-optional-s3-prefix/ec2_targetId_reportId_yyyyMMddThhmmZ.csv Prerequisites for generating a report The StartDeclarativePoliciesReport API can only be called by the management account or delegated administrators for the organization. An S3 bucket must be available before generating the report (you can create a new one or use an existing one), and it must have an appropriate bucket policy. For a sample S3 policy, see Sample Amazon S3 policy under . Trusted access must be enabled for the service for which the declarative policy will enforce a baseline configuration. If you use the Amazon Web Services Organizations console, this is done automatically when you enable declarative policies. The API uses the following service principal to identify the EC2 service: ec2.amazonaws.com. For more information on how to enable trusted access with the Amazon Web Services CLI and Amazon Web Services SDKs, see Using Organizations with other Amazon Web Services services in the Amazon Web Services Organizations User Guide. Only one report per organization can be generated at a time. Attempting to generate a report while another is in progress will result in an error. For more information, including the required IAM permissions to run this API, see Generating the account status report for declarative policies in the Amazon Web Services Organizations User Guide. + /// Generates an account status report. The report is generated asynchronously, and can take several hours to complete. The report provides the current status of all attributes supported by declarative policies for the accounts within the specified scope. The scope is determined by the specified TargetId, which can represent an individual account, or all the accounts that fall under the specified organizational unit (OU) or root (the entire Amazon Web Services Organization). The report is saved to your specified S3 bucket, using the following path structure (with the italicized placeholders representing your specific values): s3://amzn-s3-demo-bucket/your-optional-s3-prefix/ec2_targetId_reportId_yyyyMMddThhmmZ.csv Prerequisites for generating a report The StartDeclarativePoliciesReport API can only be called by the management account or delegated administrators for the organization. An S3 bucket must be available before generating the report (you can create a new one or use an existing one), it must be in the same Region where the report generation request is made, and it must have an appropriate bucket policy. For a sample S3 policy, see Sample Amazon S3 policy under . Trusted access must be enabled for the service for which the declarative policy will enforce a baseline configuration. If you use the Amazon Web Services Organizations console, this is done automatically when you enable declarative policies. The API uses the following service principal to identify the EC2 service: ec2.amazonaws.com. For more information on how to enable trusted access with the Amazon Web Services CLI and Amazon Web Services SDKs, see Using Organizations with other Amazon Web Services services in the Amazon Web Services Organizations User Guide. Only one report per organization can be generated at a time. Attempting to generate a report while another is in progress will result in an error. For more information, including the required IAM permissions to run this API, see Generating the account status report for declarative policies in the Amazon Web Services Organizations User Guide. /// /// Parameters: /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - /// - s3Bucket: The name of the S3 bucket where the report will be saved. + /// - s3Bucket: The name of the S3 bucket where the report will be saved. The bucket must be in the same Region where the report generation request is made. /// - s3Prefix: The prefix for your S3 object. /// - tagSpecifications: The tags to apply. /// - targetId: The root ID, organizational unit ID, or account ID. Format: For root: r-ab12 For OU: ou-ab12-cdef1234 For account: 123456789012 @@ -28415,7 +28459,7 @@ extension EC2 { /// /// - Parameters: /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - /// - filters: One or more filters. Filter names and values are case-sensitive. auto-recovery-supported - Indicates whether Amazon CloudWatch action based recovery is supported (true | false). bare-metal - Indicates whether it is a bare metal instance type (true | false). burstable-performance-supported - Indicates whether the instance type is a burstable performance T instance type (true | false). current-generation - Indicates whether this instance type is the latest generation instance type of an instance family (true | false). ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline bandwidth performance for an EBS-optimized instance type, in Mbps. ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage operations per second for an EBS-optimized instance type. ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline throughput performance for an EBS-optimized instance type, in MB/s. ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth performance for an EBS-optimized instance type, in Mbps. ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage operations per second for an EBS-optimized instance type. ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum throughput performance for an EBS-optimized instance type, in MB/s. ebs-info.ebs-optimized-support - Indicates whether the instance type is EBS-optimized (supported | unsupported | default). ebs-info.encryption-support - Indicates whether EBS encryption is supported (supported | unsupported). ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for EBS volumes (required | supported | unsupported). free-tier-eligible - Indicates whether the instance type is eligible to use in the free tier (true | false). hibernation-supported - Indicates whether On-Demand hibernation is supported (true | false). hypervisor - The hypervisor (nitro | xen). instance-storage-info.disk.count - The number of local disks. instance-storage-info.disk.size-in-gb - The storage size of each instance storage disk, in GB. instance-storage-info.disk.type - The storage technology for the local instance storage disks (hdd | ssd). instance-storage-info.encryption-support - Indicates whether data is encrypted at rest (required | supported | unsupported). instance-storage-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for instance store (required | supported | unsupported). instance-storage-info.total-size-in-gb - The total amount of storage available from all local instance storage, in GB. instance-storage-supported - Indicates whether the instance type has local instance storage (true | false). instance-type - The instance type (for example c5.2xlarge or c5*). memory-info.size-in-mib - The memory size. network-info.efa-info.maximum-efa-interfaces - The maximum number of Elastic Fabric Adapters (EFAs) per instance. network-info.efa-supported - Indicates whether the instance type supports Elastic Fabric Adapter (EFA) (true | false). network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is supported or required (required | supported | unsupported). network-info.encryption-in-transit-supported - Indicates whether the instance type automatically encrypts in-transit traffic between instances (true | false). network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 addresses per network interface. network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 addresses per network interface. network-info.ipv6-supported - Indicates whether the instance type supports IPv6 (true | false). network-info.maximum-network-cards - The maximum number of network cards per instance. network-info.maximum-network-interfaces - The maximum number of network interfaces per instance. network-info.network-performance - The network performance (for example, "25 Gigabit"). nitro-enclaves-support - Indicates whether Nitro Enclaves is supported (supported | unsupported). nitro-tpm-support - Indicates whether NitroTPM is supported (supported | unsupported). nitro-tpm-info.supported-versions - The supported NitroTPM version (2.0). processor-info.supported-architecture - The CPU architecture (arm64 | i386 | x86_64). processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz. processor-info.supported-features - The supported CPU features (amd-sev-snp). supported-boot-mode - The boot mode (legacy-bios | uefi). supported-root-device-type - The root device type (ebs | instance-store). supported-usage-class - The usage class (on-demand | spot | capacity-block). supported-virtualization-type - The virtualization type (hvm | paravirtual). vcpu-info.default-cores - The default number of cores for the instance type. vcpu-info.default-threads-per-core - The default number of threads per core for the instance type. vcpu-info.default-vcpus - The default number of vCPUs for the instance type. vcpu-info.valid-cores - The number of cores that can be configured for the instance type. vcpu-info.valid-threads-per-core - The number of threads per core that can be configured for the instance type. For example, "1" or "1,2". + /// - filters: One or more filters. Filter names and values are case-sensitive. auto-recovery-supported - Indicates whether Amazon CloudWatch action based recovery is supported (true | false). bare-metal - Indicates whether it is a bare metal instance type (true | false). burstable-performance-supported - Indicates whether the instance type is a burstable performance T instance type (true | false). current-generation - Indicates whether this instance type is the latest generation instance type of an instance family (true | false). ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline bandwidth performance for an EBS-optimized instance type, in Mbps. ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage operations per second for an EBS-optimized instance type. ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline throughput performance for an EBS-optimized instance type, in MB/s. ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth performance for an EBS-optimized instance type, in Mbps. ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage operations per second for an EBS-optimized instance type. ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum throughput performance for an EBS-optimized instance type, in MB/s. ebs-info.ebs-optimized-support - Indicates whether the instance type is EBS-optimized (supported | unsupported | default). ebs-info.encryption-support - Indicates whether EBS encryption is supported (supported | unsupported). ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for EBS volumes (required | supported | unsupported). free-tier-eligible - Indicates whether the instance type is eligible to use in the free tier (true | false). hibernation-supported - Indicates whether On-Demand hibernation is supported (true | false). hypervisor - The hypervisor (nitro | xen). instance-storage-info.disk.count - The number of local disks. instance-storage-info.disk.size-in-gb - The storage size of each instance storage disk, in GB. instance-storage-info.disk.type - The storage technology for the local instance storage disks (hdd | ssd). instance-storage-info.encryption-support - Indicates whether data is encrypted at rest (required | supported | unsupported). instance-storage-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for instance store (required | supported | unsupported). instance-storage-info.total-size-in-gb - The total amount of storage available from all local instance storage, in GB. instance-storage-supported - Indicates whether the instance type has local instance storage (true | false). instance-type - The instance type (for example c5.2xlarge or c5*). memory-info.size-in-mib - The memory size. network-info.bandwidth-weightings - For instances that support bandwidth weighting to boost performance (default, vpc-1, ebs-1). network-info.efa-info.maximum-efa-interfaces - The maximum number of Elastic Fabric Adapters (EFAs) per instance. network-info.efa-supported - Indicates whether the instance type supports Elastic Fabric Adapter (EFA) (true | false). network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is supported or required (required | supported | unsupported). network-info.encryption-in-transit-supported - Indicates whether the instance type automatically encrypts in-transit traffic between instances (true | false). network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 addresses per network interface. network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 addresses per network interface. network-info.ipv6-supported - Indicates whether the instance type supports IPv6 (true | false). network-info.maximum-network-cards - The maximum number of network cards per instance. network-info.maximum-network-interfaces - The maximum number of network interfaces per instance. network-info.network-performance - The network performance (for example, "25 Gigabit"). nitro-enclaves-support - Indicates whether Nitro Enclaves is supported (supported | unsupported). nitro-tpm-support - Indicates whether NitroTPM is supported (supported | unsupported). nitro-tpm-info.supported-versions - The supported NitroTPM version (2.0). processor-info.supported-architecture - The CPU architecture (arm64 | i386 | x86_64). processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz. processor-info.supported-features - The supported CPU features (amd-sev-snp). supported-boot-mode - The boot mode (legacy-bios | uefi). supported-root-device-type - The root device type (ebs | instance-store). supported-usage-class - The usage class (on-demand | spot | capacity-block). supported-virtualization-type - The virtualization type (hvm | paravirtual). vcpu-info.default-cores - The default number of cores for the instance type. vcpu-info.default-threads-per-core - The default number of threads per core for the instance type. vcpu-info.default-vcpus - The default number of vCPUs for the instance type. vcpu-info.valid-cores - The number of cores that can be configured for the instance type. vcpu-info.valid-threads-per-core - The number of threads per core that can be configured for the instance type. For example, "1" or "1,2". /// - instanceTypes: The instance types. /// - maxResults: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. /// - logger: Logger used for logging @@ -28458,7 +28502,7 @@ extension EC2 { /// /// - Parameters: /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.operator.managed - A Boolean that indicates whether the instance has a managed network interface. network-interface.operator.principal - The principal that manages the network interface. Only valid for instances with managed network interfaces, where managed is true. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. operator.managed - A Boolean that indicates whether this is a managed instance. operator.principal - The principal that manages the instance. Only valid for managed instances, where managed is true. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. + /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.operator.managed - A Boolean that indicates whether the instance has a managed network interface. network-interface.operator.principal - The principal that manages the network interface. Only valid for instances with managed network interfaces, where managed is true. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. network-performance-options.bandwidth-weighting - Where the performance boost is applied, if applicable. Valid values: default, vpc-1, ebs-1. operator.managed - A Boolean that indicates whether this is a managed instance. operator.principal - The principal that manages the instance. Only valid for managed instances, where managed is true. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. /// - instanceIds: The instance IDs. Default: Describes all your instances. /// - maxResults: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. /// - logger: Logger used for logging @@ -35358,7 +35402,7 @@ extension EC2 { /// /// - Parameters: /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.operator.managed - A Boolean that indicates whether the instance has a managed network interface. network-interface.operator.principal - The principal that manages the network interface. Only valid for instances with managed network interfaces, where managed is true. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. operator.managed - A Boolean that indicates whether this is a managed instance. operator.principal - The principal that manages the instance. Only valid for managed instances, where managed is true. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. + /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.operator.managed - A Boolean that indicates whether the instance has a managed network interface. network-interface.operator.principal - The principal that manages the network interface. Only valid for instances with managed network interfaces, where managed is true. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. network-performance-options.bandwidth-weighting - Where the performance boost is applied, if applicable. Valid values: default, vpc-1, ebs-1. operator.managed - A Boolean that indicates whether this is a managed instance. operator.principal - The principal that manages the instance. Only valid for managed instances, where managed is true. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. /// - instanceIds: The instance IDs. Default: Describes all your instances. /// - maxResults: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. /// - nextToken: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. @@ -35410,7 +35454,7 @@ extension EC2 { /// /// - Parameters: /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.operator.managed - A Boolean that indicates whether the instance has a managed network interface. network-interface.operator.principal - The principal that manages the network interface. Only valid for instances with managed network interfaces, where managed is true. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. operator.managed - A Boolean that indicates whether this is a managed instance. operator.principal - The principal that manages the instance. Only valid for managed instances, where managed is true. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. + /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.operator.managed - A Boolean that indicates whether the instance has a managed network interface. network-interface.operator.principal - The principal that manages the network interface. Only valid for instances with managed network interfaces, where managed is true. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. network-performance-options.bandwidth-weighting - Where the performance boost is applied, if applicable. Valid values: default, vpc-1, ebs-1. operator.managed - A Boolean that indicates whether this is a managed instance. operator.principal - The principal that manages the instance. Only valid for managed instances, where managed is true. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. /// - instanceIds: The instance IDs. Default: Describes all your instances. /// - maxResults: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. /// - nextToken: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. @@ -35512,7 +35556,7 @@ extension EC2 { /// /// - Parameters: /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.operator.managed - A Boolean that indicates whether the instance has a managed network interface. network-interface.operator.principal - The principal that manages the network interface. Only valid for instances with managed network interfaces, where managed is true. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. operator.managed - A Boolean that indicates whether this is a managed instance. operator.principal - The principal that manages the instance. Only valid for managed instances, where managed is true. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. + /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.operator.managed - A Boolean that indicates whether the instance has a managed network interface. network-interface.operator.principal - The principal that manages the network interface. Only valid for instances with managed network interfaces, where managed is true. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. network-performance-options.bandwidth-weighting - Where the performance boost is applied, if applicable. Valid values: default, vpc-1, ebs-1. operator.managed - A Boolean that indicates whether this is a managed instance. operator.principal - The principal that manages the instance. Only valid for managed instances, where managed is true. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. /// - instanceIds: The instance IDs. Default: Describes all your instances. /// - maxResults: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. /// - nextToken: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. @@ -35562,7 +35606,7 @@ extension EC2 { /// /// - Parameters: /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.operator.managed - A Boolean that indicates whether the instance has a managed network interface. network-interface.operator.principal - The principal that manages the network interface. Only valid for instances with managed network interfaces, where managed is true. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. operator.managed - A Boolean that indicates whether this is a managed instance. operator.principal - The principal that manages the instance. Only valid for managed instances, where managed is true. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. + /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.operator.managed - A Boolean that indicates whether the instance has a managed network interface. network-interface.operator.principal - The principal that manages the network interface. Only valid for instances with managed network interfaces, where managed is true. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. network-performance-options.bandwidth-weighting - Where the performance boost is applied, if applicable. Valid values: default, vpc-1, ebs-1. operator.managed - A Boolean that indicates whether this is a managed instance. operator.principal - The principal that manages the instance. Only valid for managed instances, where managed is true. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. /// - instanceIds: The instance IDs. Default: Describes all your instances. /// - maxResults: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. /// - nextToken: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. diff --git a/Sources/Soto/Services/EC2/EC2_shapes.swift b/Sources/Soto/Services/EC2/EC2_shapes.swift index 6207012c6a..851180f4c3 100644 --- a/Sources/Soto/Services/EC2/EC2_shapes.swift +++ b/Sources/Soto/Services/EC2/EC2_shapes.swift @@ -255,6 +255,13 @@ extension EC2 { public var description: String { return self.rawValue } } + public enum BandwidthWeightingType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case `default` = "default" + case ebs1 = "ebs-1" + case vpc1 = "vpc-1" + public var description: String { return self.rawValue } + } + public enum BareMetal: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case excluded = "excluded" case included = "included" @@ -1044,6 +1051,13 @@ extension EC2 { public var description: String { return self.rawValue } } + public enum InstanceBandwidthWeighting: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case `default` = "default" + case ebs1 = "ebs-1" + case vpc1 = "vpc-1" + public var description: String { return self.rawValue } + } + public enum InstanceBootModeValues: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case legacyBios = "legacy-bios" case uefi = "uefi" @@ -3023,6 +3037,12 @@ extension EC2 { public var description: String { return self.rawValue } } + public enum SnapshotLocationEnum: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case local = "local" + case regional = "regional" + public var description: String { return self.rawValue } + } + public enum SnapshotState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case completed = "completed" case error = "error" @@ -12712,7 +12732,9 @@ extension EC2 { public let description: String? /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? - /// The Amazon Resource Name (ARN) of the Outpost on which to create a local snapshot. To create a snapshot of a volume in a Region, omit this parameter. The snapshot is created in the same Region as the volume. To create a snapshot of a volume on an Outpost and store the snapshot in the Region, omit this parameter. The snapshot is created in the Region for the Outpost. To create a snapshot of a volume on an Outpost and store the snapshot on an Outpost, specify the ARN of the destination Outpost. The snapshot must be created on the same Outpost as the volume. For more information, see Create local snapshots from volumes on an Outpost in the Amazon EBS User Guide. + /// Only supported for volumes in Local Zones. If the source volume is not in a Local Zone, omit this parameter. To create a local snapshot in the same Local Zone as the source volume, specify local. To create a regional snapshot in the parent Region of the Local Zone, specify regional or omit this parameter. Default value: regional + public let location: SnapshotLocationEnum? + /// Only supported for volumes on Outposts. If the source volume is not on an Outpost, omit this parameter. To create the snapshot on the same Outpost as the source volume, specify the ARN of that Outpost. The snapshot must be created on the same Outpost as the volume. To create the snapshot in the parent Region of the Outpost, omit this parameter. For more information, see Create local snapshots from volumes on an Outpost in the Amazon EBS User Guide. public let outpostArn: String? /// The tags to apply to the snapshot during creation. @OptionalCustomCoding> @@ -12721,9 +12743,10 @@ extension EC2 { public let volumeId: String? @inlinable - public init(description: String? = nil, dryRun: Bool? = nil, outpostArn: String? = nil, tagSpecifications: [TagSpecification]? = nil, volumeId: String? = nil) { + public init(description: String? = nil, dryRun: Bool? = nil, location: SnapshotLocationEnum? = nil, outpostArn: String? = nil, tagSpecifications: [TagSpecification]? = nil, volumeId: String? = nil) { self.description = description self.dryRun = dryRun + self.location = location self.outpostArn = outpostArn self.tagSpecifications = tagSpecifications self.volumeId = volumeId @@ -12732,6 +12755,7 @@ extension EC2 { private enum CodingKeys: String, CodingKey { case description = "Description" case dryRun = "dryRun" + case location = "Location" case outpostArn = "OutpostArn" case tagSpecifications = "TagSpecification" case volumeId = "VolumeId" @@ -12749,18 +12773,21 @@ extension EC2 { public let dryRun: Bool? /// The instance to specify which volumes should be included in the snapshots. public let instanceSpecification: InstanceSpecification? - /// The Amazon Resource Name (ARN) of the Outpost on which to create the local snapshots. To create snapshots from an instance in a Region, omit this parameter. The snapshots are created in the same Region as the instance. To create snapshots from an instance on an Outpost and store the snapshots in the Region, omit this parameter. The snapshots are created in the Region for the Outpost. To create snapshots from an instance on an Outpost and store the snapshots on an Outpost, specify the ARN of the destination Outpost. The snapshots must be created on the same Outpost as the instance. For more information, see Create multi-volume local snapshots from instances on an Outpost in the Amazon EBS User Guide. + /// Only supported for instances in Local Zones. If the source instance is not in a Local Zone, omit this parameter. To create local snapshots in the same Local Zone as the source instance, specify local. To create a regional snapshots in the parent Region of the Local Zone, specify regional or omit this parameter. Default value: regional + public let location: SnapshotLocationEnum? + /// Only supported for instances on Outposts. If the source instance is not on an Outpost, omit this parameter. To create the snapshots on the same Outpost as the source instance, specify the ARN of that Outpost. The snapshots must be created on the same Outpost as the instance. To create the snapshots in the parent Region of the Outpost, omit this parameter. For more information, see Create local snapshots from volumes on an Outpost in the Amazon EBS User Guide. public let outpostArn: String? /// Tags to apply to every snapshot specified by the instance. @OptionalCustomCoding> public var tagSpecifications: [TagSpecification]? @inlinable - public init(copyTagsFromSource: CopyTagsFromSource? = nil, description: String? = nil, dryRun: Bool? = nil, instanceSpecification: InstanceSpecification? = nil, outpostArn: String? = nil, tagSpecifications: [TagSpecification]? = nil) { + public init(copyTagsFromSource: CopyTagsFromSource? = nil, description: String? = nil, dryRun: Bool? = nil, instanceSpecification: InstanceSpecification? = nil, location: SnapshotLocationEnum? = nil, outpostArn: String? = nil, tagSpecifications: [TagSpecification]? = nil) { self.copyTagsFromSource = copyTagsFromSource self.description = description self.dryRun = dryRun self.instanceSpecification = instanceSpecification + self.location = location self.outpostArn = outpostArn self.tagSpecifications = tagSpecifications } @@ -12770,6 +12797,7 @@ extension EC2 { case description = "Description" case dryRun = "DryRun" case instanceSpecification = "InstanceSpecification" + case location = "Location" case outpostArn = "OutpostArn" case tagSpecifications = "TagSpecification" } @@ -16758,6 +16786,24 @@ extension EC2 { } } + public struct DeleteSecurityGroupResult: AWSDecodableShape { + /// The ID of the deleted security group. + public let groupId: String? + /// Returns true if the request succeeds; otherwise, returns an error. + public let `return`: Bool? + + @inlinable + public init(groupId: String? = nil, return: Bool? = nil) { + self.groupId = groupId + self.`return` = `return` + } + + private enum CodingKeys: String, CodingKey { + case groupId = "groupId" + case `return` = "return" + } + } + public struct DeleteSnapshotRequest: AWSEncodableShape { /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? @@ -21487,7 +21533,7 @@ extension EC2 { /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? - /// One or more filters. Filter names and values are case-sensitive. auto-recovery-supported - Indicates whether Amazon CloudWatch action based recovery is supported (true | false). bare-metal - Indicates whether it is a bare metal instance type (true | false). burstable-performance-supported - Indicates whether the instance type is a burstable performance T instance type (true | false). current-generation - Indicates whether this instance type is the latest generation instance type of an instance family (true | false). ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline bandwidth performance for an EBS-optimized instance type, in Mbps. ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage operations per second for an EBS-optimized instance type. ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline throughput performance for an EBS-optimized instance type, in MB/s. ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth performance for an EBS-optimized instance type, in Mbps. ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage operations per second for an EBS-optimized instance type. ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum throughput performance for an EBS-optimized instance type, in MB/s. ebs-info.ebs-optimized-support - Indicates whether the instance type is EBS-optimized (supported | unsupported | default). ebs-info.encryption-support - Indicates whether EBS encryption is supported (supported | unsupported). ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for EBS volumes (required | supported | unsupported). free-tier-eligible - Indicates whether the instance type is eligible to use in the free tier (true | false). hibernation-supported - Indicates whether On-Demand hibernation is supported (true | false). hypervisor - The hypervisor (nitro | xen). instance-storage-info.disk.count - The number of local disks. instance-storage-info.disk.size-in-gb - The storage size of each instance storage disk, in GB. instance-storage-info.disk.type - The storage technology for the local instance storage disks (hdd | ssd). instance-storage-info.encryption-support - Indicates whether data is encrypted at rest (required | supported | unsupported). instance-storage-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for instance store (required | supported | unsupported). instance-storage-info.total-size-in-gb - The total amount of storage available from all local instance storage, in GB. instance-storage-supported - Indicates whether the instance type has local instance storage (true | false). instance-type - The instance type (for example c5.2xlarge or c5*). memory-info.size-in-mib - The memory size. network-info.efa-info.maximum-efa-interfaces - The maximum number of Elastic Fabric Adapters (EFAs) per instance. network-info.efa-supported - Indicates whether the instance type supports Elastic Fabric Adapter (EFA) (true | false). network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is supported or required (required | supported | unsupported). network-info.encryption-in-transit-supported - Indicates whether the instance type automatically encrypts in-transit traffic between instances (true | false). network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 addresses per network interface. network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 addresses per network interface. network-info.ipv6-supported - Indicates whether the instance type supports IPv6 (true | false). network-info.maximum-network-cards - The maximum number of network cards per instance. network-info.maximum-network-interfaces - The maximum number of network interfaces per instance. network-info.network-performance - The network performance (for example, "25 Gigabit"). nitro-enclaves-support - Indicates whether Nitro Enclaves is supported (supported | unsupported). nitro-tpm-support - Indicates whether NitroTPM is supported (supported | unsupported). nitro-tpm-info.supported-versions - The supported NitroTPM version (2.0). processor-info.supported-architecture - The CPU architecture (arm64 | i386 | x86_64). processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz. processor-info.supported-features - The supported CPU features (amd-sev-snp). supported-boot-mode - The boot mode (legacy-bios | uefi). supported-root-device-type - The root device type (ebs | instance-store). supported-usage-class - The usage class (on-demand | spot | capacity-block). supported-virtualization-type - The virtualization type (hvm | paravirtual). vcpu-info.default-cores - The default number of cores for the instance type. vcpu-info.default-threads-per-core - The default number of threads per core for the instance type. vcpu-info.default-vcpus - The default number of vCPUs for the instance type. vcpu-info.valid-cores - The number of cores that can be configured for the instance type. vcpu-info.valid-threads-per-core - The number of threads per core that can be configured for the instance type. For example, "1" or "1,2". + /// One or more filters. Filter names and values are case-sensitive. auto-recovery-supported - Indicates whether Amazon CloudWatch action based recovery is supported (true | false). bare-metal - Indicates whether it is a bare metal instance type (true | false). burstable-performance-supported - Indicates whether the instance type is a burstable performance T instance type (true | false). current-generation - Indicates whether this instance type is the latest generation instance type of an instance family (true | false). ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline bandwidth performance for an EBS-optimized instance type, in Mbps. ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage operations per second for an EBS-optimized instance type. ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline throughput performance for an EBS-optimized instance type, in MB/s. ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth performance for an EBS-optimized instance type, in Mbps. ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage operations per second for an EBS-optimized instance type. ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum throughput performance for an EBS-optimized instance type, in MB/s. ebs-info.ebs-optimized-support - Indicates whether the instance type is EBS-optimized (supported | unsupported | default). ebs-info.encryption-support - Indicates whether EBS encryption is supported (supported | unsupported). ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for EBS volumes (required | supported | unsupported). free-tier-eligible - Indicates whether the instance type is eligible to use in the free tier (true | false). hibernation-supported - Indicates whether On-Demand hibernation is supported (true | false). hypervisor - The hypervisor (nitro | xen). instance-storage-info.disk.count - The number of local disks. instance-storage-info.disk.size-in-gb - The storage size of each instance storage disk, in GB. instance-storage-info.disk.type - The storage technology for the local instance storage disks (hdd | ssd). instance-storage-info.encryption-support - Indicates whether data is encrypted at rest (required | supported | unsupported). instance-storage-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for instance store (required | supported | unsupported). instance-storage-info.total-size-in-gb - The total amount of storage available from all local instance storage, in GB. instance-storage-supported - Indicates whether the instance type has local instance storage (true | false). instance-type - The instance type (for example c5.2xlarge or c5*). memory-info.size-in-mib - The memory size. network-info.bandwidth-weightings - For instances that support bandwidth weighting to boost performance (default, vpc-1, ebs-1). network-info.efa-info.maximum-efa-interfaces - The maximum number of Elastic Fabric Adapters (EFAs) per instance. network-info.efa-supported - Indicates whether the instance type supports Elastic Fabric Adapter (EFA) (true | false). network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is supported or required (required | supported | unsupported). network-info.encryption-in-transit-supported - Indicates whether the instance type automatically encrypts in-transit traffic between instances (true | false). network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 addresses per network interface. network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 addresses per network interface. network-info.ipv6-supported - Indicates whether the instance type supports IPv6 (true | false). network-info.maximum-network-cards - The maximum number of network cards per instance. network-info.maximum-network-interfaces - The maximum number of network interfaces per instance. network-info.network-performance - The network performance (for example, "25 Gigabit"). nitro-enclaves-support - Indicates whether Nitro Enclaves is supported (supported | unsupported). nitro-tpm-support - Indicates whether NitroTPM is supported (supported | unsupported). nitro-tpm-info.supported-versions - The supported NitroTPM version (2.0). processor-info.supported-architecture - The CPU architecture (arm64 | i386 | x86_64). processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz. processor-info.supported-features - The supported CPU features (amd-sev-snp). supported-boot-mode - The boot mode (legacy-bios | uefi). supported-root-device-type - The root device type (ebs | instance-store). supported-usage-class - The usage class (on-demand | spot | capacity-block). supported-virtualization-type - The virtualization type (hvm | paravirtual). vcpu-info.default-cores - The default number of cores for the instance type. vcpu-info.default-threads-per-core - The default number of threads per core for the instance type. vcpu-info.default-vcpus - The default number of vCPUs for the instance type. vcpu-info.valid-cores - The number of cores that can be configured for the instance type. vcpu-info.valid-threads-per-core - The number of threads per core that can be configured for the instance type. For example, "1" or "1,2". @OptionalCustomCoding> public var filters: [Filter]? /// The instance types. @@ -21550,7 +21596,7 @@ extension EC2 { /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? - /// The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.operator.managed - A Boolean that indicates whether the instance has a managed network interface. network-interface.operator.principal - The principal that manages the network interface. Only valid for instances with managed network interfaces, where managed is true. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. operator.managed - A Boolean that indicates whether this is a managed instance. operator.principal - The principal that manages the instance. Only valid for managed instances, where managed is true. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. + /// The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.operator.managed - A Boolean that indicates whether the instance has a managed network interface. network-interface.operator.principal - The principal that manages the network interface. Only valid for instances with managed network interfaces, where managed is true. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. network-performance-options.bandwidth-weighting - Where the performance boost is applied, if applicable. Valid values: default, vpc-1, ebs-1. operator.managed - A Boolean that indicates whether this is a managed instance. operator.principal - The principal that manages the instance. Only valid for managed instances, where managed is true. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. @OptionalCustomCoding> public var filters: [Filter]? /// The instance IDs. Default: Describes all your instances. @@ -23500,7 +23546,7 @@ extension EC2 { /// gateway_load_balancer_endpoint | global_accelerator_managed | interface | /// iot_rules_managed | lambda | load_balancer | nat_gateway | /// network_load_balancer | quicksight | transit_gateway | trunk | - /// vpc_endpoint). mac-address - The MAC address of the network interface. network-interface-id - The ID of the network interface. operator.managed - A Boolean that indicates whether this is a managed network interface. operator.principal - The principal that manages the network interface. Only valid for managed network interfaces, where managed is true. owner-id - The Amazon Web Services account ID of the network interface owner. private-dns-name - The private DNS name of the network interface (IPv4). private-ip-address - The private IPv4 address or addresses of the network interface. requester-id - The alias or Amazon Web Services account ID of the principal or service that created the network interface. requester-managed - Indicates whether the network interface is being managed by an Amazon Web Services + /// vpc_endpoint). mac-address - The MAC address of the network interface. network-interface-id - The ID of the network interface. owner-id - The Amazon Web Services account ID of the network interface owner. private-dns-name - The private DNS name of the network interface (IPv4). private-ip-address - The private IPv4 address or addresses of the network interface. requester-id - The alias or Amazon Web Services account ID of the principal or service that created the network interface. requester-managed - Indicates whether the network interface is being managed by an Amazon Web Services /// service (for example, Amazon Web Services Management Console, Auto Scaling, and so on). source-dest-check - Indicates whether the network interface performs source/destination checking. /// A value of true means checking is enabled, and false means checking is disabled. /// The value must be false for the network interface to perform network address translation (NAT) in your VPC. status - The status of the network interface. If the network interface is not attached to an instance, the status is available; @@ -37010,6 +37056,8 @@ extension EC2 { /// The network interfaces for the instance. @OptionalCustomCoding> public var networkInterfaces: [InstanceNetworkInterface]? + /// Contains settings for the network performance options for your instance. + public let networkPerformanceOptions: InstanceNetworkPerformanceOptions? /// The service provider that manages the instance. public let `operator`: OperatorResponse? /// The Amazon Resource Name (ARN) of the Outpost. @@ -37071,7 +37119,7 @@ extension EC2 { public let vpcId: String? @inlinable - public init(amiLaunchIndex: Int? = nil, architecture: ArchitectureValues? = nil, blockDeviceMappings: [InstanceBlockDeviceMapping]? = nil, bootMode: BootModeValues? = nil, capacityReservationId: String? = nil, capacityReservationSpecification: CapacityReservationSpecificationResponse? = nil, clientToken: String? = nil, cpuOptions: CpuOptions? = nil, currentInstanceBootMode: InstanceBootModeValues? = nil, ebsOptimized: Bool? = nil, elasticGpuAssociations: [ElasticGpuAssociation]? = nil, elasticInferenceAcceleratorAssociations: [ElasticInferenceAcceleratorAssociation]? = nil, enaSupport: Bool? = nil, enclaveOptions: EnclaveOptions? = nil, hibernationOptions: HibernationOptions? = nil, hypervisor: HypervisorType? = nil, iamInstanceProfile: IamInstanceProfile? = nil, imageId: String? = nil, instanceId: String? = nil, instanceLifecycle: InstanceLifecycleType? = nil, instanceType: InstanceType? = nil, ipv6Address: String? = nil, kernelId: String? = nil, keyName: String? = nil, launchTime: Date? = nil, licenses: [LicenseConfiguration]? = nil, maintenanceOptions: InstanceMaintenanceOptions? = nil, metadataOptions: InstanceMetadataOptionsResponse? = nil, monitoring: Monitoring? = nil, networkInterfaces: [InstanceNetworkInterface]? = nil, operator: OperatorResponse? = nil, outpostArn: String? = nil, placement: Placement? = nil, platform: PlatformValues? = nil, platformDetails: String? = nil, privateDnsName: String? = nil, privateDnsNameOptions: PrivateDnsNameOptionsResponse? = nil, privateIpAddress: String? = nil, productCodes: [ProductCode]? = nil, publicDnsName: String? = nil, publicIpAddress: String? = nil, ramdiskId: String? = nil, rootDeviceName: String? = nil, rootDeviceType: DeviceType? = nil, securityGroups: [GroupIdentifier]? = nil, sourceDestCheck: Bool? = nil, spotInstanceRequestId: String? = nil, sriovNetSupport: String? = nil, state: InstanceState? = nil, stateReason: StateReason? = nil, stateTransitionReason: String? = nil, subnetId: String? = nil, tags: [Tag]? = nil, tpmSupport: String? = nil, usageOperation: String? = nil, usageOperationUpdateTime: Date? = nil, virtualizationType: VirtualizationType? = nil, vpcId: String? = nil) { + public init(amiLaunchIndex: Int? = nil, architecture: ArchitectureValues? = nil, blockDeviceMappings: [InstanceBlockDeviceMapping]? = nil, bootMode: BootModeValues? = nil, capacityReservationId: String? = nil, capacityReservationSpecification: CapacityReservationSpecificationResponse? = nil, clientToken: String? = nil, cpuOptions: CpuOptions? = nil, currentInstanceBootMode: InstanceBootModeValues? = nil, ebsOptimized: Bool? = nil, elasticGpuAssociations: [ElasticGpuAssociation]? = nil, elasticInferenceAcceleratorAssociations: [ElasticInferenceAcceleratorAssociation]? = nil, enaSupport: Bool? = nil, enclaveOptions: EnclaveOptions? = nil, hibernationOptions: HibernationOptions? = nil, hypervisor: HypervisorType? = nil, iamInstanceProfile: IamInstanceProfile? = nil, imageId: String? = nil, instanceId: String? = nil, instanceLifecycle: InstanceLifecycleType? = nil, instanceType: InstanceType? = nil, ipv6Address: String? = nil, kernelId: String? = nil, keyName: String? = nil, launchTime: Date? = nil, licenses: [LicenseConfiguration]? = nil, maintenanceOptions: InstanceMaintenanceOptions? = nil, metadataOptions: InstanceMetadataOptionsResponse? = nil, monitoring: Monitoring? = nil, networkInterfaces: [InstanceNetworkInterface]? = nil, networkPerformanceOptions: InstanceNetworkPerformanceOptions? = nil, operator: OperatorResponse? = nil, outpostArn: String? = nil, placement: Placement? = nil, platform: PlatformValues? = nil, platformDetails: String? = nil, privateDnsName: String? = nil, privateDnsNameOptions: PrivateDnsNameOptionsResponse? = nil, privateIpAddress: String? = nil, productCodes: [ProductCode]? = nil, publicDnsName: String? = nil, publicIpAddress: String? = nil, ramdiskId: String? = nil, rootDeviceName: String? = nil, rootDeviceType: DeviceType? = nil, securityGroups: [GroupIdentifier]? = nil, sourceDestCheck: Bool? = nil, spotInstanceRequestId: String? = nil, sriovNetSupport: String? = nil, state: InstanceState? = nil, stateReason: StateReason? = nil, stateTransitionReason: String? = nil, subnetId: String? = nil, tags: [Tag]? = nil, tpmSupport: String? = nil, usageOperation: String? = nil, usageOperationUpdateTime: Date? = nil, virtualizationType: VirtualizationType? = nil, vpcId: String? = nil) { self.amiLaunchIndex = amiLaunchIndex self.architecture = architecture self.blockDeviceMappings = blockDeviceMappings @@ -37102,6 +37150,7 @@ extension EC2 { self.metadataOptions = metadataOptions self.monitoring = monitoring self.networkInterfaces = networkInterfaces + self.networkPerformanceOptions = networkPerformanceOptions self.`operator` = `operator` self.outpostArn = outpostArn self.placement = placement @@ -37163,6 +37212,7 @@ extension EC2 { case metadataOptions = "metadataOptions" case monitoring = "monitoring" case networkInterfaces = "networkInterfaceSet" + case networkPerformanceOptions = "networkPerformanceOptions" case `operator` = "operator" case outpostArn = "outpostArn" case placement = "placement" @@ -38232,6 +38282,34 @@ extension EC2 { } } + public struct InstanceNetworkPerformanceOptions: AWSDecodableShape { + /// When you configure network bandwidth weighting, you can boost your baseline bandwidth for either networking or EBS by up to 25%. The total available baseline bandwidth for your instance remains the same. The default option uses the standard bandwidth configuration for your instance type. + public let bandwidthWeighting: InstanceBandwidthWeighting? + + @inlinable + public init(bandwidthWeighting: InstanceBandwidthWeighting? = nil) { + self.bandwidthWeighting = bandwidthWeighting + } + + private enum CodingKeys: String, CodingKey { + case bandwidthWeighting = "bandwidthWeighting" + } + } + + public struct InstanceNetworkPerformanceOptionsRequest: AWSEncodableShape { + /// Specify the bandwidth weighting option to boost the associated type of baseline bandwidth, as follows: default This option uses the standard bandwidth configuration for your instance type. vpc-1 This option boosts your networking baseline bandwidth and reduces your EBS baseline bandwidth. ebs-1 This option boosts your EBS baseline bandwidth and reduces your networking baseline bandwidth. + public let bandwidthWeighting: InstanceBandwidthWeighting? + + @inlinable + public init(bandwidthWeighting: InstanceBandwidthWeighting? = nil) { + self.bandwidthWeighting = bandwidthWeighting + } + + private enum CodingKeys: String, CodingKey { + case bandwidthWeighting = "BandwidthWeighting" + } + } + public struct InstancePrivateIpAddress: AWSDecodableShape { /// The association information for an Elastic IP address for the network interface. public let association: InstanceNetworkInterfaceAssociation? @@ -41436,6 +41514,34 @@ extension EC2 { } } + public struct LaunchTemplateNetworkPerformanceOptions: AWSDecodableShape { + /// When you configure network bandwidth weighting, you can boost baseline bandwidth for either networking or EBS by up to 25%. The total available baseline bandwidth for your instance remains the same. The default option uses the standard bandwidth configuration for your instance type. + public let bandwidthWeighting: InstanceBandwidthWeighting? + + @inlinable + public init(bandwidthWeighting: InstanceBandwidthWeighting? = nil) { + self.bandwidthWeighting = bandwidthWeighting + } + + private enum CodingKeys: String, CodingKey { + case bandwidthWeighting = "bandwidthWeighting" + } + } + + public struct LaunchTemplateNetworkPerformanceOptionsRequest: AWSEncodableShape { + /// Specify the bandwidth weighting option to boost the associated type of baseline bandwidth, as follows: default This option uses the standard bandwidth configuration for your instance type. vpc-1 This option boosts your networking baseline bandwidth and reduces your EBS baseline bandwidth. ebs-1 This option boosts your EBS baseline bandwidth and reduces your networking baseline bandwidth. + public let bandwidthWeighting: InstanceBandwidthWeighting? + + @inlinable + public init(bandwidthWeighting: InstanceBandwidthWeighting? = nil) { + self.bandwidthWeighting = bandwidthWeighting + } + + private enum CodingKeys: String, CodingKey { + case bandwidthWeighting = "BandwidthWeighting" + } + } + public struct LaunchTemplateOverrides: AWSEncodableShape & AWSDecodableShape { /// The Availability Zone in which to launch the instances. public let availabilityZone: String? @@ -43857,6 +43963,46 @@ extension EC2 { } } + public struct ModifyInstanceNetworkPerformanceRequest: AWSEncodableShape { + /// Specify the bandwidth weighting option to boost the associated type of baseline bandwidth, as follows: default This option uses the standard bandwidth configuration for your instance type. vpc-1 This option boosts your networking baseline bandwidth and reduces your EBS baseline bandwidth. ebs-1 This option boosts your EBS baseline bandwidth and reduces your networking baseline bandwidth. + public let bandwidthWeighting: InstanceBandwidthWeighting? + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + public let dryRun: Bool? + /// The ID of the instance to update. + public let instanceId: String? + + @inlinable + public init(bandwidthWeighting: InstanceBandwidthWeighting? = nil, dryRun: Bool? = nil, instanceId: String? = nil) { + self.bandwidthWeighting = bandwidthWeighting + self.dryRun = dryRun + self.instanceId = instanceId + } + + private enum CodingKeys: String, CodingKey { + case bandwidthWeighting = "BandwidthWeighting" + case dryRun = "DryRun" + case instanceId = "InstanceId" + } + } + + public struct ModifyInstanceNetworkPerformanceResult: AWSDecodableShape { + /// Contains the updated configuration for bandwidth weighting on the specified instance. + public let bandwidthWeighting: InstanceBandwidthWeighting? + /// The instance ID that was updated. + public let instanceId: String? + + @inlinable + public init(bandwidthWeighting: InstanceBandwidthWeighting? = nil, instanceId: String? = nil) { + self.bandwidthWeighting = bandwidthWeighting + self.instanceId = instanceId + } + + private enum CodingKeys: String, CodingKey { + case bandwidthWeighting = "bandwidthWeighting" + case instanceId = "instanceId" + } + } + public struct ModifyInstancePlacementRequest: AWSEncodableShape { /// The affinity setting for the instance. For more information, see Host affinity in the Amazon EC2 User Guide. public let affinity: Affinity? @@ -47116,8 +47262,12 @@ extension EC2 { } public struct NetworkInfo: AWSDecodableShape { + public struct _BandwidthWeightingsEncoding: ArrayCoderProperties { public static let member = "item" } public struct _NetworkCardsEncoding: ArrayCoderProperties { public static let member = "item" } + /// A list of valid settings for configurable bandwidth weighting for the instance type, if supported. + @OptionalCustomCoding> + public var bandwidthWeightings: [BandwidthWeightingType]? /// The index of the default network card, starting at 0. public let defaultNetworkCardIndex: Int? /// Describes the Elastic Fabric Adapters for the instance type. @@ -47147,7 +47297,8 @@ extension EC2 { public let networkPerformance: String? @inlinable - public init(defaultNetworkCardIndex: Int? = nil, efaInfo: EfaInfo? = nil, efaSupported: Bool? = nil, enaSrdSupported: Bool? = nil, enaSupport: EnaSupport? = nil, encryptionInTransitSupported: Bool? = nil, ipv4AddressesPerInterface: Int? = nil, ipv6AddressesPerInterface: Int? = nil, ipv6Supported: Bool? = nil, maximumNetworkCards: Int? = nil, maximumNetworkInterfaces: Int? = nil, networkCards: [NetworkCardInfo]? = nil, networkPerformance: String? = nil) { + public init(bandwidthWeightings: [BandwidthWeightingType]? = nil, defaultNetworkCardIndex: Int? = nil, efaInfo: EfaInfo? = nil, efaSupported: Bool? = nil, enaSrdSupported: Bool? = nil, enaSupport: EnaSupport? = nil, encryptionInTransitSupported: Bool? = nil, ipv4AddressesPerInterface: Int? = nil, ipv6AddressesPerInterface: Int? = nil, ipv6Supported: Bool? = nil, maximumNetworkCards: Int? = nil, maximumNetworkInterfaces: Int? = nil, networkCards: [NetworkCardInfo]? = nil, networkPerformance: String? = nil) { + self.bandwidthWeightings = bandwidthWeightings self.defaultNetworkCardIndex = defaultNetworkCardIndex self.efaInfo = efaInfo self.efaSupported = efaSupported @@ -47164,6 +47315,7 @@ extension EC2 { } private enum CodingKeys: String, CodingKey { + case bandwidthWeightings = "bandwidthWeightings" case defaultNetworkCardIndex = "defaultNetworkCardIndex" case efaInfo = "efaInfo" case efaSupported = "efaSupported" @@ -51122,6 +51274,8 @@ extension EC2 { /// The network interfaces for the instance. @OptionalCustomCoding> public var networkInterfaces: [LaunchTemplateInstanceNetworkInterfaceSpecificationRequest]? + /// Contains launch template settings to boost network performance for the type of workload that runs on your instance. + public let networkPerformanceOptions: LaunchTemplateNetworkPerformanceOptionsRequest? /// The entity that manages the launch template. public let `operator`: OperatorRequest? /// The placement for the instance. @@ -51143,7 +51297,7 @@ extension EC2 { public let userData: String? @inlinable - public init(blockDeviceMappings: [LaunchTemplateBlockDeviceMappingRequest]? = nil, capacityReservationSpecification: LaunchTemplateCapacityReservationSpecificationRequest? = nil, cpuOptions: LaunchTemplateCpuOptionsRequest? = nil, creditSpecification: CreditSpecificationRequest? = nil, disableApiStop: Bool? = nil, disableApiTermination: Bool? = nil, ebsOptimized: Bool? = nil, elasticGpuSpecifications: [ElasticGpuSpecification]? = nil, elasticInferenceAccelerators: [LaunchTemplateElasticInferenceAccelerator]? = nil, enclaveOptions: LaunchTemplateEnclaveOptionsRequest? = nil, hibernationOptions: LaunchTemplateHibernationOptionsRequest? = nil, iamInstanceProfile: LaunchTemplateIamInstanceProfileSpecificationRequest? = nil, imageId: String? = nil, instanceInitiatedShutdownBehavior: ShutdownBehavior? = nil, instanceMarketOptions: LaunchTemplateInstanceMarketOptionsRequest? = nil, instanceRequirements: InstanceRequirementsRequest? = nil, instanceType: InstanceType? = nil, kernelId: String? = nil, keyName: String? = nil, licenseSpecifications: [LaunchTemplateLicenseConfigurationRequest]? = nil, maintenanceOptions: LaunchTemplateInstanceMaintenanceOptionsRequest? = nil, metadataOptions: LaunchTemplateInstanceMetadataOptionsRequest? = nil, monitoring: LaunchTemplatesMonitoringRequest? = nil, networkInterfaces: [LaunchTemplateInstanceNetworkInterfaceSpecificationRequest]? = nil, operator: OperatorRequest? = nil, placement: LaunchTemplatePlacementRequest? = nil, privateDnsNameOptions: LaunchTemplatePrivateDnsNameOptionsRequest? = nil, ramDiskId: String? = nil, securityGroupIds: [String]? = nil, securityGroups: [String]? = nil, tagSpecifications: [LaunchTemplateTagSpecificationRequest]? = nil, userData: String? = nil) { + public init(blockDeviceMappings: [LaunchTemplateBlockDeviceMappingRequest]? = nil, capacityReservationSpecification: LaunchTemplateCapacityReservationSpecificationRequest? = nil, cpuOptions: LaunchTemplateCpuOptionsRequest? = nil, creditSpecification: CreditSpecificationRequest? = nil, disableApiStop: Bool? = nil, disableApiTermination: Bool? = nil, ebsOptimized: Bool? = nil, elasticGpuSpecifications: [ElasticGpuSpecification]? = nil, elasticInferenceAccelerators: [LaunchTemplateElasticInferenceAccelerator]? = nil, enclaveOptions: LaunchTemplateEnclaveOptionsRequest? = nil, hibernationOptions: LaunchTemplateHibernationOptionsRequest? = nil, iamInstanceProfile: LaunchTemplateIamInstanceProfileSpecificationRequest? = nil, imageId: String? = nil, instanceInitiatedShutdownBehavior: ShutdownBehavior? = nil, instanceMarketOptions: LaunchTemplateInstanceMarketOptionsRequest? = nil, instanceRequirements: InstanceRequirementsRequest? = nil, instanceType: InstanceType? = nil, kernelId: String? = nil, keyName: String? = nil, licenseSpecifications: [LaunchTemplateLicenseConfigurationRequest]? = nil, maintenanceOptions: LaunchTemplateInstanceMaintenanceOptionsRequest? = nil, metadataOptions: LaunchTemplateInstanceMetadataOptionsRequest? = nil, monitoring: LaunchTemplatesMonitoringRequest? = nil, networkInterfaces: [LaunchTemplateInstanceNetworkInterfaceSpecificationRequest]? = nil, networkPerformanceOptions: LaunchTemplateNetworkPerformanceOptionsRequest? = nil, operator: OperatorRequest? = nil, placement: LaunchTemplatePlacementRequest? = nil, privateDnsNameOptions: LaunchTemplatePrivateDnsNameOptionsRequest? = nil, ramDiskId: String? = nil, securityGroupIds: [String]? = nil, securityGroups: [String]? = nil, tagSpecifications: [LaunchTemplateTagSpecificationRequest]? = nil, userData: String? = nil) { self.blockDeviceMappings = blockDeviceMappings self.capacityReservationSpecification = capacityReservationSpecification self.cpuOptions = cpuOptions @@ -51168,6 +51322,7 @@ extension EC2 { self.metadataOptions = metadataOptions self.monitoring = monitoring self.networkInterfaces = networkInterfaces + self.networkPerformanceOptions = networkPerformanceOptions self.`operator` = `operator` self.placement = placement self.privateDnsNameOptions = privateDnsNameOptions @@ -51210,6 +51365,7 @@ extension EC2 { case metadataOptions = "MetadataOptions" case monitoring = "Monitoring" case networkInterfaces = "NetworkInterface" + case networkPerformanceOptions = "NetworkPerformanceOptions" case `operator` = "Operator" case placement = "Placement" case privateDnsNameOptions = "PrivateDnsNameOptions" @@ -52233,6 +52389,8 @@ extension EC2 { /// The network interfaces. @OptionalCustomCoding> public var networkInterfaces: [LaunchTemplateInstanceNetworkInterfaceSpecification]? + /// Contains the launch template settings for network performance options for your instance. + public let networkPerformanceOptions: LaunchTemplateNetworkPerformanceOptions? /// The entity that manages the launch template. public let `operator`: OperatorResponse? /// The placement of the instance. @@ -52254,7 +52412,7 @@ extension EC2 { public let userData: String? @inlinable - public init(blockDeviceMappings: [LaunchTemplateBlockDeviceMapping]? = nil, capacityReservationSpecification: LaunchTemplateCapacityReservationSpecificationResponse? = nil, cpuOptions: LaunchTemplateCpuOptions? = nil, creditSpecification: CreditSpecification? = nil, disableApiStop: Bool? = nil, disableApiTermination: Bool? = nil, ebsOptimized: Bool? = nil, elasticGpuSpecifications: [ElasticGpuSpecificationResponse]? = nil, elasticInferenceAccelerators: [LaunchTemplateElasticInferenceAcceleratorResponse]? = nil, enclaveOptions: LaunchTemplateEnclaveOptions? = nil, hibernationOptions: LaunchTemplateHibernationOptions? = nil, iamInstanceProfile: LaunchTemplateIamInstanceProfileSpecification? = nil, imageId: String? = nil, instanceInitiatedShutdownBehavior: ShutdownBehavior? = nil, instanceMarketOptions: LaunchTemplateInstanceMarketOptions? = nil, instanceRequirements: InstanceRequirements? = nil, instanceType: InstanceType? = nil, kernelId: String? = nil, keyName: String? = nil, licenseSpecifications: [LaunchTemplateLicenseConfiguration]? = nil, maintenanceOptions: LaunchTemplateInstanceMaintenanceOptions? = nil, metadataOptions: LaunchTemplateInstanceMetadataOptions? = nil, monitoring: LaunchTemplatesMonitoring? = nil, networkInterfaces: [LaunchTemplateInstanceNetworkInterfaceSpecification]? = nil, operator: OperatorResponse? = nil, placement: LaunchTemplatePlacement? = nil, privateDnsNameOptions: LaunchTemplatePrivateDnsNameOptions? = nil, ramDiskId: String? = nil, securityGroupIds: [String]? = nil, securityGroups: [String]? = nil, tagSpecifications: [LaunchTemplateTagSpecification]? = nil, userData: String? = nil) { + public init(blockDeviceMappings: [LaunchTemplateBlockDeviceMapping]? = nil, capacityReservationSpecification: LaunchTemplateCapacityReservationSpecificationResponse? = nil, cpuOptions: LaunchTemplateCpuOptions? = nil, creditSpecification: CreditSpecification? = nil, disableApiStop: Bool? = nil, disableApiTermination: Bool? = nil, ebsOptimized: Bool? = nil, elasticGpuSpecifications: [ElasticGpuSpecificationResponse]? = nil, elasticInferenceAccelerators: [LaunchTemplateElasticInferenceAcceleratorResponse]? = nil, enclaveOptions: LaunchTemplateEnclaveOptions? = nil, hibernationOptions: LaunchTemplateHibernationOptions? = nil, iamInstanceProfile: LaunchTemplateIamInstanceProfileSpecification? = nil, imageId: String? = nil, instanceInitiatedShutdownBehavior: ShutdownBehavior? = nil, instanceMarketOptions: LaunchTemplateInstanceMarketOptions? = nil, instanceRequirements: InstanceRequirements? = nil, instanceType: InstanceType? = nil, kernelId: String? = nil, keyName: String? = nil, licenseSpecifications: [LaunchTemplateLicenseConfiguration]? = nil, maintenanceOptions: LaunchTemplateInstanceMaintenanceOptions? = nil, metadataOptions: LaunchTemplateInstanceMetadataOptions? = nil, monitoring: LaunchTemplatesMonitoring? = nil, networkInterfaces: [LaunchTemplateInstanceNetworkInterfaceSpecification]? = nil, networkPerformanceOptions: LaunchTemplateNetworkPerformanceOptions? = nil, operator: OperatorResponse? = nil, placement: LaunchTemplatePlacement? = nil, privateDnsNameOptions: LaunchTemplatePrivateDnsNameOptions? = nil, ramDiskId: String? = nil, securityGroupIds: [String]? = nil, securityGroups: [String]? = nil, tagSpecifications: [LaunchTemplateTagSpecification]? = nil, userData: String? = nil) { self.blockDeviceMappings = blockDeviceMappings self.capacityReservationSpecification = capacityReservationSpecification self.cpuOptions = cpuOptions @@ -52279,6 +52437,7 @@ extension EC2 { self.metadataOptions = metadataOptions self.monitoring = monitoring self.networkInterfaces = networkInterfaces + self.networkPerformanceOptions = networkPerformanceOptions self.`operator` = `operator` self.placement = placement self.privateDnsNameOptions = privateDnsNameOptions @@ -52314,6 +52473,7 @@ extension EC2 { case metadataOptions = "metadataOptions" case monitoring = "monitoring" case networkInterfaces = "networkInterfaceSet" + case networkPerformanceOptions = "networkPerformanceOptions" case `operator` = "operator" case placement = "placement" case privateDnsNameOptions = "privateDnsNameOptions" @@ -53156,6 +53316,8 @@ extension EC2 { /// The network interfaces to associate with the instance. @OptionalCustomCoding> public var networkInterfaces: [InstanceNetworkInterfaceSpecification]? + /// Contains settings for the network performance options for the instance. + public let networkPerformanceOptions: InstanceNetworkPerformanceOptionsRequest? /// Reserved for internal use. public let `operator`: OperatorRequest? /// The placement for the instance. @@ -53181,7 +53343,7 @@ extension EC2 { public let userData: String? @inlinable - public init(additionalInfo: String? = nil, blockDeviceMappings: [BlockDeviceMapping]? = nil, capacityReservationSpecification: CapacityReservationSpecification? = nil, clientToken: String? = RunInstancesRequest.idempotencyToken(), cpuOptions: CpuOptionsRequest? = nil, creditSpecification: CreditSpecificationRequest? = nil, disableApiStop: Bool? = nil, disableApiTermination: Bool? = nil, dryRun: Bool? = nil, ebsOptimized: Bool? = nil, elasticGpuSpecification: [ElasticGpuSpecification]? = nil, elasticInferenceAccelerators: [ElasticInferenceAccelerator]? = nil, enablePrimaryIpv6: Bool? = nil, enclaveOptions: EnclaveOptionsRequest? = nil, hibernationOptions: HibernationOptionsRequest? = nil, iamInstanceProfile: IamInstanceProfileSpecification? = nil, imageId: String? = nil, instanceInitiatedShutdownBehavior: ShutdownBehavior? = nil, instanceMarketOptions: InstanceMarketOptionsRequest? = nil, instanceType: InstanceType? = nil, ipv6AddressCount: Int? = nil, ipv6Addresses: [InstanceIpv6Address]? = nil, kernelId: String? = nil, keyName: String? = nil, launchTemplate: LaunchTemplateSpecification? = nil, licenseSpecifications: [LicenseConfigurationRequest]? = nil, maintenanceOptions: InstanceMaintenanceOptionsRequest? = nil, maxCount: Int? = nil, metadataOptions: InstanceMetadataOptionsRequest? = nil, minCount: Int? = nil, monitoring: RunInstancesMonitoringEnabled? = nil, networkInterfaces: [InstanceNetworkInterfaceSpecification]? = nil, operator: OperatorRequest? = nil, placement: Placement? = nil, privateDnsNameOptions: PrivateDnsNameOptionsRequest? = nil, privateIpAddress: String? = nil, ramdiskId: String? = nil, securityGroupIds: [String]? = nil, securityGroups: [String]? = nil, subnetId: String? = nil, tagSpecifications: [TagSpecification]? = nil, userData: String? = nil) { + public init(additionalInfo: String? = nil, blockDeviceMappings: [BlockDeviceMapping]? = nil, capacityReservationSpecification: CapacityReservationSpecification? = nil, clientToken: String? = RunInstancesRequest.idempotencyToken(), cpuOptions: CpuOptionsRequest? = nil, creditSpecification: CreditSpecificationRequest? = nil, disableApiStop: Bool? = nil, disableApiTermination: Bool? = nil, dryRun: Bool? = nil, ebsOptimized: Bool? = nil, elasticGpuSpecification: [ElasticGpuSpecification]? = nil, elasticInferenceAccelerators: [ElasticInferenceAccelerator]? = nil, enablePrimaryIpv6: Bool? = nil, enclaveOptions: EnclaveOptionsRequest? = nil, hibernationOptions: HibernationOptionsRequest? = nil, iamInstanceProfile: IamInstanceProfileSpecification? = nil, imageId: String? = nil, instanceInitiatedShutdownBehavior: ShutdownBehavior? = nil, instanceMarketOptions: InstanceMarketOptionsRequest? = nil, instanceType: InstanceType? = nil, ipv6AddressCount: Int? = nil, ipv6Addresses: [InstanceIpv6Address]? = nil, kernelId: String? = nil, keyName: String? = nil, launchTemplate: LaunchTemplateSpecification? = nil, licenseSpecifications: [LicenseConfigurationRequest]? = nil, maintenanceOptions: InstanceMaintenanceOptionsRequest? = nil, maxCount: Int? = nil, metadataOptions: InstanceMetadataOptionsRequest? = nil, minCount: Int? = nil, monitoring: RunInstancesMonitoringEnabled? = nil, networkInterfaces: [InstanceNetworkInterfaceSpecification]? = nil, networkPerformanceOptions: InstanceNetworkPerformanceOptionsRequest? = nil, operator: OperatorRequest? = nil, placement: Placement? = nil, privateDnsNameOptions: PrivateDnsNameOptionsRequest? = nil, privateIpAddress: String? = nil, ramdiskId: String? = nil, securityGroupIds: [String]? = nil, securityGroups: [String]? = nil, subnetId: String? = nil, tagSpecifications: [TagSpecification]? = nil, userData: String? = nil) { self.additionalInfo = additionalInfo self.blockDeviceMappings = blockDeviceMappings self.capacityReservationSpecification = capacityReservationSpecification @@ -53214,6 +53376,7 @@ extension EC2 { self.minCount = minCount self.monitoring = monitoring self.networkInterfaces = networkInterfaces + self.networkPerformanceOptions = networkPerformanceOptions self.`operator` = `operator` self.placement = placement self.privateDnsNameOptions = privateDnsNameOptions @@ -53265,6 +53428,7 @@ extension EC2 { case minCount = "MinCount" case monitoring = "Monitoring" case networkInterfaces = "networkInterface" + case networkPerformanceOptions = "NetworkPerformanceOptions" case `operator` = "Operator" case placement = "Placement" case privateDnsNameOptions = "PrivateDnsNameOptions" @@ -54586,6 +54750,8 @@ extension EC2 { public struct Snapshot: AWSDecodableShape { public struct _TagsEncoding: ArrayCoderProperties { public static let member = "item" } + /// The Availability Zone or Local Zone of the snapshot. For example, us-west-1a (Availability Zone) or us-west-2-lax-1a (Local Zone). + public let availabilityZone: String? /// Only for snapshot copies created with time-based snapshot copy operations. The completion duration requested for the time-based snapshot copy operation. public let completionDurationMinutes: Int? /// The time stamp when the snapshot was completed. @@ -54631,7 +54797,8 @@ extension EC2 { public let volumeSize: Int? @inlinable - public init(completionDurationMinutes: Int? = nil, completionTime: Date? = nil, dataEncryptionKeyId: String? = nil, description: String? = nil, encrypted: Bool? = nil, kmsKeyId: String? = nil, outpostArn: String? = nil, ownerAlias: String? = nil, ownerId: String? = nil, progress: String? = nil, restoreExpiryTime: Date? = nil, snapshotId: String? = nil, sseType: SSEType? = nil, startTime: Date? = nil, state: SnapshotState? = nil, stateMessage: String? = nil, storageTier: StorageTier? = nil, tags: [Tag]? = nil, transferType: TransferType? = nil, volumeId: String? = nil, volumeSize: Int? = nil) { + public init(availabilityZone: String? = nil, completionDurationMinutes: Int? = nil, completionTime: Date? = nil, dataEncryptionKeyId: String? = nil, description: String? = nil, encrypted: Bool? = nil, kmsKeyId: String? = nil, outpostArn: String? = nil, ownerAlias: String? = nil, ownerId: String? = nil, progress: String? = nil, restoreExpiryTime: Date? = nil, snapshotId: String? = nil, sseType: SSEType? = nil, startTime: Date? = nil, state: SnapshotState? = nil, stateMessage: String? = nil, storageTier: StorageTier? = nil, tags: [Tag]? = nil, transferType: TransferType? = nil, volumeId: String? = nil, volumeSize: Int? = nil) { + self.availabilityZone = availabilityZone self.completionDurationMinutes = completionDurationMinutes self.completionTime = completionTime self.dataEncryptionKeyId = dataEncryptionKeyId @@ -54656,6 +54823,7 @@ extension EC2 { } private enum CodingKeys: String, CodingKey { + case availabilityZone = "availabilityZone" case completionDurationMinutes = "completionDurationMinutes" case completionTime = "completionTime" case dataEncryptionKeyId = "dataEncryptionKeyId" @@ -54759,6 +54927,8 @@ extension EC2 { public struct SnapshotInfo: AWSDecodableShape { public struct _TagsEncoding: ArrayCoderProperties { public static let member = "item" } + /// The Availability Zone or Local Zone of the snapshots. For example, us-west-1a (Availability Zone) or us-west-2-lax-1a (Local Zone). + public let availabilityZone: String? /// Description specified by the CreateSnapshotRequest that has been applied to all snapshots. public let description: String? /// Indicates whether the snapshot is encrypted. @@ -54786,7 +54956,8 @@ extension EC2 { public let volumeSize: Int? @inlinable - public init(description: String? = nil, encrypted: Bool? = nil, outpostArn: String? = nil, ownerId: String? = nil, progress: String? = nil, snapshotId: String? = nil, sseType: SSEType? = nil, startTime: Date? = nil, state: SnapshotState? = nil, tags: [Tag]? = nil, volumeId: String? = nil, volumeSize: Int? = nil) { + public init(availabilityZone: String? = nil, description: String? = nil, encrypted: Bool? = nil, outpostArn: String? = nil, ownerId: String? = nil, progress: String? = nil, snapshotId: String? = nil, sseType: SSEType? = nil, startTime: Date? = nil, state: SnapshotState? = nil, tags: [Tag]? = nil, volumeId: String? = nil, volumeSize: Int? = nil) { + self.availabilityZone = availabilityZone self.description = description self.encrypted = encrypted self.outpostArn = outpostArn @@ -54802,6 +54973,7 @@ extension EC2 { } private enum CodingKeys: String, CodingKey { + case availabilityZone = "availabilityZone" case description = "description" case encrypted = "encrypted" case outpostArn = "outpostArn" @@ -55728,7 +55900,7 @@ extension EC2 { /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? - /// The name of the S3 bucket where the report will be saved. + /// The name of the S3 bucket where the report will be saved. The bucket must be in the same Region where the report generation request is made. public let s3Bucket: String? /// The prefix for your S3 object. public let s3Prefix: String? diff --git a/Sources/Soto/Services/ECS/ECS_api.swift b/Sources/Soto/Services/ECS/ECS_api.swift index 76a4f31ad2..9adcb4d539 100644 --- a/Sources/Soto/Services/ECS/ECS_api.swift +++ b/Sources/Soto/Services/ECS/ECS_api.swift @@ -25,15 +25,16 @@ import Foundation /// Service object for interacting with AWS ECS service. /// -/// Amazon Elastic Container Service Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service. It makes it easy to run, -/// stop, and manage Docker containers. You can host your cluster on a serverless infrastructure that's -/// managed by Amazon ECS by launching your services or tasks on Fargate. For more control, you can host your -/// tasks on a cluster of Amazon Elastic Compute Cloud (Amazon EC2) or External (on-premises) instances that you manage. Amazon ECS makes it easy to launch and stop container-based applications with simple API calls. This makes -/// it easy to get the state of your cluster from a centralized service, and gives you access to many -/// familiar Amazon EC2 features. You can use Amazon ECS to schedule the placement of containers across your cluster based on your resource -/// needs, isolation policies, and availability requirements. With Amazon ECS, you don't need to operate your -/// own cluster management and configuration management systems. You also don't need to worry about scaling -/// your management infrastructure. +/// Amazon Elastic Container Service Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service. It makes +/// it easy to run, stop, and manage Docker containers. You can host your cluster on a +/// serverless infrastructure that's managed by Amazon ECS by launching your services or tasks on +/// Fargate. For more control, you can host your tasks on a cluster of Amazon Elastic Compute Cloud (Amazon EC2) +/// or External (on-premises) instances that you manage. Amazon ECS makes it easy to launch and stop container-based applications with simple API +/// calls. This makes it easy to get the state of your cluster from a centralized service, +/// and gives you access to many familiar Amazon EC2 features. You can use Amazon ECS to schedule the placement of containers across your cluster based on +/// your resource needs, isolation policies, and availability requirements. With Amazon ECS, you +/// don't need to operate your own cluster management and configuration management systems. +/// You also don't need to worry about scaling your management infrastructure. public struct ECS: AWSService { // MARK: Member variables @@ -101,10 +102,12 @@ public struct ECS: AWSService { // MARK: API Calls - /// Creates a new capacity provider. Capacity providers are associated with an Amazon ECS cluster and are used - /// in capacity provider strategies to facilitate cluster auto scaling. Only capacity providers that use an Auto Scaling group can be created. Amazon ECS tasks on Fargate use - /// the FARGATE and FARGATE_SPOT capacity providers. These providers are - /// available to all accounts in the Amazon Web Services Regions that Fargate supports. + /// Creates a new capacity provider. Capacity providers are associated with an Amazon ECS + /// cluster and are used in capacity provider strategies to facilitate cluster auto + /// scaling. Only capacity providers that use an Auto Scaling group can be created. Amazon ECS tasks on + /// Fargate use the FARGATE and FARGATE_SPOT capacity providers. + /// These providers are available to all accounts in the Amazon Web Services Regions that Fargate + /// supports. @Sendable @inlinable public func createCapacityProvider(_ input: CreateCapacityProviderRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateCapacityProviderResponse { @@ -117,15 +120,17 @@ public struct ECS: AWSService { logger: logger ) } - /// Creates a new capacity provider. Capacity providers are associated with an Amazon ECS cluster and are used - /// in capacity provider strategies to facilitate cluster auto scaling. Only capacity providers that use an Auto Scaling group can be created. Amazon ECS tasks on Fargate use - /// the FARGATE and FARGATE_SPOT capacity providers. These providers are - /// available to all accounts in the Amazon Web Services Regions that Fargate supports. + /// Creates a new capacity provider. Capacity providers are associated with an Amazon ECS + /// cluster and are used in capacity provider strategies to facilitate cluster auto + /// scaling. Only capacity providers that use an Auto Scaling group can be created. Amazon ECS tasks on + /// Fargate use the FARGATE and FARGATE_SPOT capacity providers. + /// These providers are available to all accounts in the Amazon Web Services Regions that Fargate + /// supports. /// /// Parameters: /// - autoScalingGroupProvider: The details of the Auto Scaling group for the capacity provider. - /// - name: The name of the capacity provider. Up to 255 characters are allowed. They include letters (both upper - /// - tags: The metadata that you apply to the capacity provider to categorize and organize them more + /// - name: The name of the capacity provider. Up to 255 characters are allowed. They include + /// - tags: The metadata that you apply to the capacity provider to categorize and organize them /// - logger: Logger use during operation @inlinable public func createCapacityProvider( @@ -142,14 +147,14 @@ public struct ECS: AWSService { return try await self.createCapacityProvider(input, logger: logger) } - /// Creates a new Amazon ECS cluster. By default, your account receives a default cluster when - /// you launch your first container instance. However, you can create your own cluster with a unique - /// name. When you call the CreateCluster API operation, - /// Amazon ECS attempts to create the Amazon ECS service-linked role for your account. This is so that it can - /// manage required resources in other Amazon Web Services services on your behalf. However, if the user that makes - /// the call doesn't have permissions to create the service-linked role, it isn't created. For more - /// information, see Using service-linked - /// roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide. + /// Creates a new Amazon ECS cluster. By default, your account receives a default + /// cluster when you launch your first container instance. However, you can create your own + /// cluster with a unique name. When you call the CreateCluster + /// API operation, Amazon ECS attempts to create the Amazon ECS service-linked role for your + /// account. This is so that it can manage required resources in other Amazon Web Services services on + /// your behalf. However, if the user that makes the call doesn't have permissions to + /// create the service-linked role, it isn't created. For more information, see Using + /// service-linked roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide. @Sendable @inlinable public func createCluster(_ input: CreateClusterRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateClusterResponse { @@ -162,23 +167,23 @@ public struct ECS: AWSService { logger: logger ) } - /// Creates a new Amazon ECS cluster. By default, your account receives a default cluster when - /// you launch your first container instance. However, you can create your own cluster with a unique - /// name. When you call the CreateCluster API operation, - /// Amazon ECS attempts to create the Amazon ECS service-linked role for your account. This is so that it can - /// manage required resources in other Amazon Web Services services on your behalf. However, if the user that makes - /// the call doesn't have permissions to create the service-linked role, it isn't created. For more - /// information, see Using service-linked - /// roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide. + /// Creates a new Amazon ECS cluster. By default, your account receives a default + /// cluster when you launch your first container instance. However, you can create your own + /// cluster with a unique name. When you call the CreateCluster + /// API operation, Amazon ECS attempts to create the Amazon ECS service-linked role for your + /// account. This is so that it can manage required resources in other Amazon Web Services services on + /// your behalf. However, if the user that makes the call doesn't have permissions to + /// create the service-linked role, it isn't created. For more information, see Using + /// service-linked roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide. /// /// Parameters: - /// - capacityProviders: The short name of one or more capacity providers to associate with the cluster. A capacity provider - /// - clusterName: The name of your cluster. If you don't specify a name for your cluster, you create a cluster that's + /// - capacityProviders: The short name of one or more capacity providers to associate with the cluster. A + /// - clusterName: The name of your cluster. If you don't specify a name for your cluster, you create a /// - configuration: The execute command configuration for the cluster. - /// - defaultCapacityProviderStrategy: The capacity provider strategy to set as the default for the cluster. After a default capacity + /// - defaultCapacityProviderStrategy: The capacity provider strategy to set as the default for the cluster. After a default /// - serviceConnectDefaults: Use this parameter to set a default Service Connect namespace. After you set a default - /// - settings: The setting to use when creating a cluster. This parameter is used to turn on CloudWatch Container Insights - /// - tags: The metadata that you apply to the cluster to help you categorize and organize them. Each tag + /// - settings: The setting to use when creating a cluster. This parameter is used to turn on CloudWatch + /// - tags: The metadata that you apply to the cluster to help you categorize and organize them. /// - logger: Logger use during operation @inlinable public func createCluster( @@ -203,63 +208,66 @@ public struct ECS: AWSService { return try await self.createCluster(input, logger: logger) } - /// Runs and maintains your desired number of tasks from a specified task definition. If the number of - /// tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the - /// task in the specified cluster. To update an existing service, use UpdateService. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. In addition to maintaining the desired count of tasks in your service, you can optionally run your - /// service behind one or more load balancers. The load balancers distribute traffic across the tasks that - /// are associated with the service. For more information, see Service load - /// balancing in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a - /// service. volumeConfigurations is only supported for REPLICA service and not DAEMON - /// service. For more infomation, see Amazon EBS - /// volumes in the Amazon Elastic Container Service Developer Guide. Tasks for services that don't use a load balancer are considered healthy if they're in the - /// RUNNING state. Tasks for services that use a load balancer are considered healthy if - /// they're in the RUNNING state and are reported as healthy by the load balancer. There are two service scheduler strategies available: REPLICA - The replica scheduling strategy places and maintains your - /// desired number of tasks across your cluster. By default, the service scheduler spreads tasks - /// across Availability Zones. You can use task placement strategies and constraints to customize - /// task placement decisions. For more information, see Service - /// scheduler concepts in the Amazon Elastic Container Service Developer Guide. DAEMON - The daemon scheduling strategy deploys exactly one task on each - /// active container instance that meets all of the task placement constraints that you specify in - /// your cluster. The service scheduler also evaluates the task placement constraints for running - /// tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, - /// you don't need to specify a desired number of tasks, a task placement strategy, or use Service - /// Auto Scaling policies. For more information, see Service - /// scheduler concepts in the Amazon Elastic Container Service Developer Guide. You can optionally specify a deployment configuration for your service. The deployment is initiated - /// by changing properties. For example, the deployment might be initiated by the task definition or by - /// your desired count of a service. You can use UpdateService. The default value for a replica service for - /// minimumHealthyPercent is 100%. The default value for a daemon service for - /// minimumHealthyPercent is 0%. If a service uses the ECS deployment controller, the minimum healthy percent represents - /// a lower limit on the number of tasks in a service that must remain in the RUNNING state - /// during a deployment. Specifically, it represents it as a percentage of your desired number of tasks - /// (rounded up to the nearest integer). This happens when any of your container instances are in the - /// DRAINING state if the service contains tasks using the EC2 launch type. - /// Using this parameter, you can deploy without using additional cluster capacity. For example, if you set - /// your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler - /// might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in - /// the RUNNING state, tasks for services that don't use a load balancer are considered - /// healthy . If they're in the RUNNING state and reported as healthy by the load balancer, - /// tasks for services that do use a load balancer are considered healthy . The - /// default value for minimum healthy percent is 100%. If a service uses the ECS deployment controller, the maximum - /// percent parameter represents an upper limit on the number of tasks in a service that are - /// allowed in the RUNNING or PENDING state during a deployment. Specifically, it - /// represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). - /// This happens when any of your container instances are in the DRAINING state if the service - /// contains tasks using the EC2 launch type. Using this parameter, you can define the - /// deployment batch size. For example, if your service has a desired number of four tasks and a maximum - /// percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks - /// (provided that the cluster resources required to do this are available). The default value for maximum - /// percent is 200%. If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller - /// types and tasks that use the EC2 launch type, the minimum healthy - /// percent and maximum percent values are used only to - /// define the lower and upper limit on the number of the tasks in the service that remain in the - /// RUNNING state. This is while the container instances are in the DRAINING - /// state. If the tasks in the service use the Fargate launch type, the minimum healthy - /// percent and maximum percent values aren't used. This is the case even if they're currently visible when - /// describing your service. When creating a service that uses the EXTERNAL deployment controller, you can specify - /// only parameters that aren't controlled at the task set level. The only required parameter is the - /// service name. You control your services using the CreateTaskSet. For more information, see Amazon ECS deployment - /// types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement. For information about - /// task placement and task placement strategies, see Amazon ECS task - /// placement in the Amazon Elastic Container Service Developer Guide + /// Runs and maintains your desired number of tasks from a specified task definition. If + /// the number of tasks running in a service drops below the desiredCount, + /// Amazon ECS runs another copy of the task in the specified cluster. To update an existing + /// service, use UpdateService. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. In addition to maintaining the desired count of tasks in your service, you can + /// optionally run your service behind one or more load balancers. The load balancers + /// distribute traffic across the tasks that are associated with the service. For more + /// information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or + /// updating a service. volumeConfigurations is only supported for REPLICA + /// service and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. Tasks for services that don't use a load balancer are considered healthy if they're in + /// the RUNNING state. Tasks for services that use a load balancer are + /// considered healthy if they're in the RUNNING state and are reported as + /// healthy by the load balancer. There are two service scheduler strategies available: REPLICA - The replica scheduling strategy places and + /// maintains your desired number of tasks across your cluster. By default, the + /// service scheduler spreads tasks across Availability Zones. You can use task + /// placement strategies and constraints to customize task placement decisions. For + /// more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. DAEMON - The daemon scheduling strategy deploys exactly one + /// task on each active container instance that meets all of the task placement + /// constraints that you specify in your cluster. The service scheduler also + /// evaluates the task placement constraints for running tasks. It also stops tasks + /// that don't meet the placement constraints. When using this strategy, you don't + /// need to specify a desired number of tasks, a task placement strategy, or use + /// Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. You can optionally specify a deployment configuration for your service. The deployment + /// is initiated by changing properties. For example, the deployment might be initiated by + /// the task definition or by your desired count of a service. You can use UpdateService. The default value for a replica service for + /// minimumHealthyPercent is 100%. The default value for a daemon service + /// for minimumHealthyPercent is 0%. If a service uses the ECS deployment controller, the minimum healthy + /// percent represents a lower limit on the number of tasks in a service that must remain in + /// the RUNNING state during a deployment. Specifically, it represents it as a + /// percentage of your desired number of tasks (rounded up to the nearest integer). This + /// happens when any of your container instances are in the DRAINING state if + /// the service contains tasks using the EC2 launch type. Using this + /// parameter, you can deploy without using additional cluster capacity. For example, if you + /// set your service to have desired number of four tasks and a minimum healthy percent of + /// 50%, the scheduler might stop two existing tasks to free up cluster capacity before + /// starting two new tasks. If they're in the RUNNING state, tasks for services + /// that don't use a load balancer are considered healthy . If they're in the + /// RUNNING state and reported as healthy by the load balancer, tasks for + /// services that do use a load balancer are considered healthy . The + /// default value for minimum healthy percent is 100%. If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the + /// number of tasks in a service that are allowed in the RUNNING or + /// PENDING state during a deployment. Specifically, it represents it as a + /// percentage of the desired number of tasks (rounded down to the nearest integer). This + /// happens when any of your container instances are in the DRAINING state if + /// the service contains tasks using the EC2 launch type. Using this + /// parameter, you can define the deployment batch size. For example, if your service has a + /// desired number of four tasks and a maximum percent value of 200%, the scheduler may + /// start four new tasks before stopping the four older tasks (provided that the cluster + /// resources required to do this are available). The default value for maximum percent is + /// 200%. If a service uses either the CODE_DEPLOY or EXTERNAL + /// deployment controller types and tasks that use the EC2 launch type, the + /// minimum healthy percent and maximum percent values are used only to define the lower and upper limit + /// on the number of the tasks in the service that remain in the RUNNING state. + /// This is while the container instances are in the DRAINING state. If the + /// tasks in the service use the Fargate launch type, the minimum healthy + /// percent and maximum percent values aren't used. This is the case even if they're + /// currently visible when describing your service. When creating a service that uses the EXTERNAL deployment controller, you + /// can specify only parameters that aren't controlled at the task set level. The only + /// required parameter is the service name. You control your services using the CreateTaskSet. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement. For + /// information about task placement and task placement strategies, see Amazon ECS + /// task placement in the Amazon Elastic Container Service Developer Guide @Sendable @inlinable public func createService(_ input: CreateServiceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateServiceResponse { @@ -272,90 +280,93 @@ public struct ECS: AWSService { logger: logger ) } - /// Runs and maintains your desired number of tasks from a specified task definition. If the number of - /// tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the - /// task in the specified cluster. To update an existing service, use UpdateService. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. In addition to maintaining the desired count of tasks in your service, you can optionally run your - /// service behind one or more load balancers. The load balancers distribute traffic across the tasks that - /// are associated with the service. For more information, see Service load - /// balancing in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a - /// service. volumeConfigurations is only supported for REPLICA service and not DAEMON - /// service. For more infomation, see Amazon EBS - /// volumes in the Amazon Elastic Container Service Developer Guide. Tasks for services that don't use a load balancer are considered healthy if they're in the - /// RUNNING state. Tasks for services that use a load balancer are considered healthy if - /// they're in the RUNNING state and are reported as healthy by the load balancer. There are two service scheduler strategies available: REPLICA - The replica scheduling strategy places and maintains your - /// desired number of tasks across your cluster. By default, the service scheduler spreads tasks - /// across Availability Zones. You can use task placement strategies and constraints to customize - /// task placement decisions. For more information, see Service - /// scheduler concepts in the Amazon Elastic Container Service Developer Guide. DAEMON - The daemon scheduling strategy deploys exactly one task on each - /// active container instance that meets all of the task placement constraints that you specify in - /// your cluster. The service scheduler also evaluates the task placement constraints for running - /// tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, - /// you don't need to specify a desired number of tasks, a task placement strategy, or use Service - /// Auto Scaling policies. For more information, see Service - /// scheduler concepts in the Amazon Elastic Container Service Developer Guide. You can optionally specify a deployment configuration for your service. The deployment is initiated - /// by changing properties. For example, the deployment might be initiated by the task definition or by - /// your desired count of a service. You can use UpdateService. The default value for a replica service for - /// minimumHealthyPercent is 100%. The default value for a daemon service for - /// minimumHealthyPercent is 0%. If a service uses the ECS deployment controller, the minimum healthy percent represents - /// a lower limit on the number of tasks in a service that must remain in the RUNNING state - /// during a deployment. Specifically, it represents it as a percentage of your desired number of tasks - /// (rounded up to the nearest integer). This happens when any of your container instances are in the - /// DRAINING state if the service contains tasks using the EC2 launch type. - /// Using this parameter, you can deploy without using additional cluster capacity. For example, if you set - /// your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler - /// might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in - /// the RUNNING state, tasks for services that don't use a load balancer are considered - /// healthy . If they're in the RUNNING state and reported as healthy by the load balancer, - /// tasks for services that do use a load balancer are considered healthy . The - /// default value for minimum healthy percent is 100%. If a service uses the ECS deployment controller, the maximum - /// percent parameter represents an upper limit on the number of tasks in a service that are - /// allowed in the RUNNING or PENDING state during a deployment. Specifically, it - /// represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). - /// This happens when any of your container instances are in the DRAINING state if the service - /// contains tasks using the EC2 launch type. Using this parameter, you can define the - /// deployment batch size. For example, if your service has a desired number of four tasks and a maximum - /// percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks - /// (provided that the cluster resources required to do this are available). The default value for maximum - /// percent is 200%. If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller - /// types and tasks that use the EC2 launch type, the minimum healthy - /// percent and maximum percent values are used only to - /// define the lower and upper limit on the number of the tasks in the service that remain in the - /// RUNNING state. This is while the container instances are in the DRAINING - /// state. If the tasks in the service use the Fargate launch type, the minimum healthy - /// percent and maximum percent values aren't used. This is the case even if they're currently visible when - /// describing your service. When creating a service that uses the EXTERNAL deployment controller, you can specify - /// only parameters that aren't controlled at the task set level. The only required parameter is the - /// service name. You control your services using the CreateTaskSet. For more information, see Amazon ECS deployment - /// types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement. For information about - /// task placement and task placement strategies, see Amazon ECS task - /// placement in the Amazon Elastic Container Service Developer Guide + /// Runs and maintains your desired number of tasks from a specified task definition. If + /// the number of tasks running in a service drops below the desiredCount, + /// Amazon ECS runs another copy of the task in the specified cluster. To update an existing + /// service, use UpdateService. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. In addition to maintaining the desired count of tasks in your service, you can + /// optionally run your service behind one or more load balancers. The load balancers + /// distribute traffic across the tasks that are associated with the service. For more + /// information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or + /// updating a service. volumeConfigurations is only supported for REPLICA + /// service and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. Tasks for services that don't use a load balancer are considered healthy if they're in + /// the RUNNING state. Tasks for services that use a load balancer are + /// considered healthy if they're in the RUNNING state and are reported as + /// healthy by the load balancer. There are two service scheduler strategies available: REPLICA - The replica scheduling strategy places and + /// maintains your desired number of tasks across your cluster. By default, the + /// service scheduler spreads tasks across Availability Zones. You can use task + /// placement strategies and constraints to customize task placement decisions. For + /// more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. DAEMON - The daemon scheduling strategy deploys exactly one + /// task on each active container instance that meets all of the task placement + /// constraints that you specify in your cluster. The service scheduler also + /// evaluates the task placement constraints for running tasks. It also stops tasks + /// that don't meet the placement constraints. When using this strategy, you don't + /// need to specify a desired number of tasks, a task placement strategy, or use + /// Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. You can optionally specify a deployment configuration for your service. The deployment + /// is initiated by changing properties. For example, the deployment might be initiated by + /// the task definition or by your desired count of a service. You can use UpdateService. The default value for a replica service for + /// minimumHealthyPercent is 100%. The default value for a daemon service + /// for minimumHealthyPercent is 0%. If a service uses the ECS deployment controller, the minimum healthy + /// percent represents a lower limit on the number of tasks in a service that must remain in + /// the RUNNING state during a deployment. Specifically, it represents it as a + /// percentage of your desired number of tasks (rounded up to the nearest integer). This + /// happens when any of your container instances are in the DRAINING state if + /// the service contains tasks using the EC2 launch type. Using this + /// parameter, you can deploy without using additional cluster capacity. For example, if you + /// set your service to have desired number of four tasks and a minimum healthy percent of + /// 50%, the scheduler might stop two existing tasks to free up cluster capacity before + /// starting two new tasks. If they're in the RUNNING state, tasks for services + /// that don't use a load balancer are considered healthy . If they're in the + /// RUNNING state and reported as healthy by the load balancer, tasks for + /// services that do use a load balancer are considered healthy . The + /// default value for minimum healthy percent is 100%. If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the + /// number of tasks in a service that are allowed in the RUNNING or + /// PENDING state during a deployment. Specifically, it represents it as a + /// percentage of the desired number of tasks (rounded down to the nearest integer). This + /// happens when any of your container instances are in the DRAINING state if + /// the service contains tasks using the EC2 launch type. Using this + /// parameter, you can define the deployment batch size. For example, if your service has a + /// desired number of four tasks and a maximum percent value of 200%, the scheduler may + /// start four new tasks before stopping the four older tasks (provided that the cluster + /// resources required to do this are available). The default value for maximum percent is + /// 200%. If a service uses either the CODE_DEPLOY or EXTERNAL + /// deployment controller types and tasks that use the EC2 launch type, the + /// minimum healthy percent and maximum percent values are used only to define the lower and upper limit + /// on the number of the tasks in the service that remain in the RUNNING state. + /// This is while the container instances are in the DRAINING state. If the + /// tasks in the service use the Fargate launch type, the minimum healthy + /// percent and maximum percent values aren't used. This is the case even if they're + /// currently visible when describing your service. When creating a service that uses the EXTERNAL deployment controller, you + /// can specify only parameters that aren't controlled at the task set level. The only + /// required parameter is the service name. You control your services using the CreateTaskSet. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement. For + /// information about task placement and task placement strategies, see Amazon ECS + /// task placement in the Amazon Elastic Container Service Developer Guide /// /// Parameters: /// - availabilityZoneRebalancing: Indicates whether to use Availability Zone rebalancing for the service. For more information, see Balancing an Amazon ECS service across Availability Zones in - /// - capacityProviderStrategy: The capacity provider strategy to use for the service. If a capacityProviderStrategy is specified, the launchType parameter must + /// - capacityProviderStrategy: The capacity provider strategy to use for the service. If a capacityProviderStrategy is specified, the launchType /// - clientToken: An identifier that you provide to ensure the idempotency of the request. It must be /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that you run your service on. - /// - deploymentConfiguration: Optional deployment parameters that control how many tasks run during the deployment and the ordering - /// - deploymentController: The deployment controller to use for the service. If no deployment controller is specified, the - /// - desiredCount: The number of instantiations of the specified task definition to place and keep running in your - /// - enableECSManagedTags: Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more + /// - deploymentConfiguration: Optional deployment parameters that control how many tasks run during the deployment + /// - deploymentController: The deployment controller to use for the service. If no deployment controller is + /// - desiredCount: The number of instantiations of the specified task definition to place and keep + /// - enableECSManagedTags: Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For /// - enableExecuteCommand: Determines whether the execute command functionality is turned on for the service. If - /// - healthCheckGracePeriodSeconds: The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing, VPC Lattice, and container - /// - launchType: The infrastructure that you run your service on. For more information, see Amazon ECS launch - /// - loadBalancers: A load balancer object representing the load balancers to use with your service. For more - /// - networkConfiguration: The network configuration for the service. This parameter is required for task definitions that use - /// - placementConstraints: An array of placement constraint objects to use for tasks in your service. You can specify a maximum - /// - placementStrategy: The placement strategy objects to use for tasks in your service. You can specify a maximum of 5 - /// - platformVersion: The platform version that your tasks in the service are running on. A platform version is specified - /// - propagateTags: Specifies whether to propagate the tags from the task definition to the task. If no value is - /// - role: The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load balancer on - /// - schedulingStrategy: The scheduling strategy to use for the service. For more information, see Services. There are two service scheduler strategies available: REPLICA-The replica scheduling strategy places and maintains the desired + /// - healthCheckGracePeriodSeconds: The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy + /// - launchType: The infrastructure that you run your service on. For more information, see Amazon ECS + /// - loadBalancers: A load balancer object representing the load balancers to use with your service. For + /// - networkConfiguration: The network configuration for the service. This parameter is required for task + /// - placementConstraints: An array of placement constraint objects to use for tasks in your service. You can + /// - placementStrategy: The placement strategy objects to use for tasks in your service. You can specify a + /// - platformVersion: The platform version that your tasks in the service are running on. A platform version + /// - propagateTags: Specifies whether to propagate the tags from the task definition to the task. If no + /// - role: The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your + /// - schedulingStrategy: The scheduling strategy to use for the service. For more information, see Services. There are two service scheduler strategies available: REPLICA-The replica scheduling strategy places and /// - serviceConnectConfiguration: The configuration for this service to discover and connect to - /// - serviceName: The name of your service. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. Service names must be unique within a cluster, but - /// - serviceRegistries: The details of the service discovery registry to associate with this service. For more information, - /// - tags: The metadata that you apply to the service to help you categorize and organize them. Each tag - /// - taskDefinition: The family and revision (family:revision) or full ARN of the - /// - volumeConfigurations: The configuration for a volume specified in the task definition as a volume that is configured at + /// - serviceName: The name of your service. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. Service names must be unique within + /// - serviceRegistries: The details of the service discovery registry to associate with this service. For more + /// - tags: The metadata that you apply to the service to help you categorize and organize them. + /// - taskDefinition: The family and revision (family:revision) or + /// - volumeConfigurations: The configuration for a volume specified in the task definition as a volume that is /// - vpcLatticeConfigurations: The VPC Lattice configuration for the service being created. /// - logger: Logger use during operation @inlinable @@ -419,10 +430,11 @@ public struct ECS: AWSService { return try await self.createService(input, logger: logger) } - /// Create a task set in the specified cluster and service. This is used when a service uses the - /// EXTERNAL deployment controller type. For more information, see Amazon ECS deployment - /// types in the Amazon Elastic Container Service Developer Guide. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. For information about the maximum number of task sets and other quotas, see Amazon ECS service - /// quotas in the Amazon Elastic Container Service Developer Guide. + /// Create a task set in the specified cluster and service. This is used when a service + /// uses the EXTERNAL deployment controller type. For more information, see + /// Amazon ECS deployment + /// types in the Amazon Elastic Container Service Developer Guide. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. For information about the maximum number of task sets and other quotas, see Amazon ECS + /// service quotas in the Amazon Elastic Container Service Developer Guide. @Sendable @inlinable public func createTaskSet(_ input: CreateTaskSetRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateTaskSetResponse { @@ -435,25 +447,26 @@ public struct ECS: AWSService { logger: logger ) } - /// Create a task set in the specified cluster and service. This is used when a service uses the - /// EXTERNAL deployment controller type. For more information, see Amazon ECS deployment - /// types in the Amazon Elastic Container Service Developer Guide. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. For information about the maximum number of task sets and other quotas, see Amazon ECS service - /// quotas in the Amazon Elastic Container Service Developer Guide. + /// Create a task set in the specified cluster and service. This is used when a service + /// uses the EXTERNAL deployment controller type. For more information, see + /// Amazon ECS deployment + /// types in the Amazon Elastic Container Service Developer Guide. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. For information about the maximum number of task sets and other quotas, see Amazon ECS + /// service quotas in the Amazon Elastic Container Service Developer Guide. /// /// Parameters: /// - capacityProviderStrategy: The capacity provider strategy to use for the task set. A capacity provider strategy consists of one or more capacity providers along with the /// - clientToken: An identifier that you provide to ensure the idempotency of the request. It must be - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service to create the task set - /// - externalId: An optional non-unique tag that identifies this task set in external systems. If the task set is - /// - launchType: The launch type that new tasks in the task set uses. For more information, see Amazon ECS launch - /// - loadBalancers: A load balancer object representing the load balancer to use with the task set. The supported load + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service to create the + /// - externalId: An optional non-unique tag that identifies this task set in external systems. If the + /// - launchType: The launch type that new tasks in the task set uses. For more information, see Amazon ECS + /// - loadBalancers: A load balancer object representing the load balancer to use with the task set. The /// - networkConfiguration: An object representing the network configuration for a task set. - /// - platformVersion: The platform version that the tasks in the task set uses. A platform version is specified only for - /// - scale: A floating-point percentage of the desired number of tasks to place and keep running in the task + /// - platformVersion: The platform version that the tasks in the task set uses. A platform version is + /// - scale: A floating-point percentage of the desired number of tasks to place and keep running /// - service: The short name or full Amazon Resource Name (ARN) of the service to create the task set in. - /// - serviceRegistries: The details of the service discovery registries to assign to this task set. For more information, see - /// - tags: The metadata that you apply to the task set to help you categorize and organize them. Each tag - /// - taskDefinition: The task definition for the tasks in the task set to use. If a revision isn't specified, the latest + /// - serviceRegistries: The details of the service discovery registries to assign to this task set. For more + /// - tags: The metadata that you apply to the task set to help you categorize and organize them. + /// - taskDefinition: The task definition for the tasks in the task set to use. If a revision isn't /// - logger: Logger use during operation @inlinable public func createTaskSet( @@ -490,7 +503,8 @@ public struct ECS: AWSService { return try await self.createTaskSet(input, logger: logger) } - /// Disables an account setting for a specified user, role, or the root user for an account. + /// Disables an account setting for a specified user, role, or the root user for an + /// account. @Sendable @inlinable public func deleteAccountSetting(_ input: DeleteAccountSettingRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteAccountSettingResponse { @@ -503,11 +517,12 @@ public struct ECS: AWSService { logger: logger ) } - /// Disables an account setting for a specified user, role, or the root user for an account. + /// Disables an account setting for a specified user, role, or the root user for an + /// account. /// /// Parameters: - /// - name: The resource name to disable the account setting for. If serviceLongArnFormat is - /// - principalArn: The Amazon Resource Name (ARN) of the principal. It can be an user, role, or the root user. If you + /// - name: The resource name to disable the account setting for. If + /// - principalArn: The Amazon Resource Name (ARN) of the principal. It can be an user, role, or the /// - logger: Logger use during operation @inlinable public func deleteAccountSetting( @@ -538,8 +553,8 @@ public struct ECS: AWSService { /// Deletes one or more custom attributes from an Amazon ECS resource. /// /// Parameters: - /// - attributes: The attributes to delete from your resource. You can specify up to 10 attributes for each request. - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that contains the resource to delete attributes. + /// - attributes: The attributes to delete from your resource. You can specify up to 10 attributes for + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that contains the resource to delete /// - logger: Logger use during operation @inlinable public func deleteAttributes( @@ -554,14 +569,16 @@ public struct ECS: AWSService { return try await self.deleteAttributes(input, logger: logger) } - /// Deletes the specified capacity provider. The FARGATE and FARGATE_SPOT capacity providers are reserved and can't - /// be deleted. You can disassociate them from a cluster using either PutClusterCapacityProviders or by deleting the cluster. Prior to a capacity provider being deleted, the capacity provider must be removed from the capacity - /// provider strategy from all services. The UpdateService API can be used to - /// remove a capacity provider from a service's capacity provider strategy. When updating a service, the - /// forceNewDeployment option can be used to ensure that any tasks using the Amazon EC2 - /// instance capacity provided by the capacity provider are transitioned to use the capacity from the - /// remaining capacity providers. Only capacity providers that aren't associated with a cluster can be - /// deleted. To remove a capacity provider from a cluster, you can either use PutClusterCapacityProviders or delete the cluster. + /// Deletes the specified capacity provider. The FARGATE and FARGATE_SPOT capacity providers are + /// reserved and can't be deleted. You can disassociate them from a cluster using either + /// PutClusterCapacityProviders or by deleting the cluster. Prior to a capacity provider being deleted, the capacity provider must be removed from + /// the capacity provider strategy from all services. The UpdateService API + /// can be used to remove a capacity provider from a service's capacity provider strategy. + /// When updating a service, the forceNewDeployment option can be used to + /// ensure that any tasks using the Amazon EC2 instance capacity provided by the capacity + /// provider are transitioned to use the capacity from the remaining capacity providers. + /// Only capacity providers that aren't associated with a cluster can be deleted. To remove + /// a capacity provider from a cluster, you can either use PutClusterCapacityProviders or delete the cluster. @Sendable @inlinable public func deleteCapacityProvider(_ input: DeleteCapacityProviderRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteCapacityProviderResponse { @@ -574,14 +591,16 @@ public struct ECS: AWSService { logger: logger ) } - /// Deletes the specified capacity provider. The FARGATE and FARGATE_SPOT capacity providers are reserved and can't - /// be deleted. You can disassociate them from a cluster using either PutClusterCapacityProviders or by deleting the cluster. Prior to a capacity provider being deleted, the capacity provider must be removed from the capacity - /// provider strategy from all services. The UpdateService API can be used to - /// remove a capacity provider from a service's capacity provider strategy. When updating a service, the - /// forceNewDeployment option can be used to ensure that any tasks using the Amazon EC2 - /// instance capacity provided by the capacity provider are transitioned to use the capacity from the - /// remaining capacity providers. Only capacity providers that aren't associated with a cluster can be - /// deleted. To remove a capacity provider from a cluster, you can either use PutClusterCapacityProviders or delete the cluster. + /// Deletes the specified capacity provider. The FARGATE and FARGATE_SPOT capacity providers are + /// reserved and can't be deleted. You can disassociate them from a cluster using either + /// PutClusterCapacityProviders or by deleting the cluster. Prior to a capacity provider being deleted, the capacity provider must be removed from + /// the capacity provider strategy from all services. The UpdateService API + /// can be used to remove a capacity provider from a service's capacity provider strategy. + /// When updating a service, the forceNewDeployment option can be used to + /// ensure that any tasks using the Amazon EC2 instance capacity provided by the capacity + /// provider are transitioned to use the capacity from the remaining capacity providers. + /// Only capacity providers that aren't associated with a cluster can be deleted. To remove + /// a capacity provider from a cluster, you can either use PutClusterCapacityProviders or delete the cluster. /// /// Parameters: /// - capacityProvider: The short name or full Amazon Resource Name (ARN) of the capacity provider to delete. @@ -597,12 +616,11 @@ public struct ECS: AWSService { return try await self.deleteCapacityProvider(input, logger: logger) } - /// Deletes the specified cluster. The cluster transitions to the INACTIVE state. Clusters - /// with an INACTIVE status might remain discoverable in your account for a period of time. - /// However, this behavior is subject to change in the future. We don't recommend that you rely on - /// INACTIVE clusters persisting. You must deregister all container instances from this cluster before you may delete it. You can list - /// the container instances in a cluster with ListContainerInstances - /// and deregister them with DeregisterContainerInstance. + /// Deletes the specified cluster. The cluster transitions to the INACTIVE + /// state. Clusters with an INACTIVE status might remain discoverable in your + /// account for a period of time. However, this behavior is subject to change in the future. + /// We don't recommend that you rely on INACTIVE clusters persisting. You must deregister all container instances from this cluster before you may delete + /// it. You can list the container instances in a cluster with ListContainerInstances and deregister them with DeregisterContainerInstance. @Sendable @inlinable public func deleteCluster(_ input: DeleteClusterRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteClusterResponse { @@ -615,12 +633,11 @@ public struct ECS: AWSService { logger: logger ) } - /// Deletes the specified cluster. The cluster transitions to the INACTIVE state. Clusters - /// with an INACTIVE status might remain discoverable in your account for a period of time. - /// However, this behavior is subject to change in the future. We don't recommend that you rely on - /// INACTIVE clusters persisting. You must deregister all container instances from this cluster before you may delete it. You can list - /// the container instances in a cluster with ListContainerInstances - /// and deregister them with DeregisterContainerInstance. + /// Deletes the specified cluster. The cluster transitions to the INACTIVE + /// state. Clusters with an INACTIVE status might remain discoverable in your + /// account for a period of time. However, this behavior is subject to change in the future. + /// We don't recommend that you rely on INACTIVE clusters persisting. You must deregister all container instances from this cluster before you may delete + /// it. You can list the container instances in a cluster with ListContainerInstances and deregister them with DeregisterContainerInstance. /// /// Parameters: /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster to delete. @@ -636,18 +653,21 @@ public struct ECS: AWSService { return try await self.deleteCluster(input, logger: logger) } - /// Deletes a specified service within a cluster. You can delete a service if you have no running tasks - /// in it and the desired task count is zero. If the service is actively maintaining tasks, you can't - /// delete it, and you must update the service to a desired task count of zero. For more information, see - /// UpdateService. When you delete a service, if there are still running tasks that require cleanup, the service - /// status moves from ACTIVE to DRAINING, and the service is no longer - /// visible in the console or in the ListServices API operation. - /// After all tasks have transitioned to either STOPPING or STOPPED status, - /// the service status moves from DRAINING to INACTIVE. Services in the - /// DRAINING or INACTIVE status can still be viewed with the DescribeServices API operation. However, in the future, INACTIVE services - /// may be cleaned up and purged from Amazon ECS record keeping, and DescribeServices calls on - /// those services return a ServiceNotFoundException error. If you attempt to create a new service with the same name as an existing service in either - /// ACTIVE or DRAINING status, you receive an error. + /// Deletes a specified service within a cluster. You can delete a service if you have no + /// running tasks in it and the desired task count is zero. If the service is actively + /// maintaining tasks, you can't delete it, and you must update the service to a desired + /// task count of zero. For more information, see UpdateService. When you delete a service, if there are still running tasks that require cleanup, + /// the service status moves from ACTIVE to DRAINING, and the + /// service is no longer visible in the console or in the ListServices + /// API operation. After all tasks have transitioned to either STOPPING or + /// STOPPED status, the service status moves from DRAINING + /// to INACTIVE. Services in the DRAINING or + /// INACTIVE status can still be viewed with the DescribeServices API operation. However, in the future, + /// INACTIVE services may be cleaned up and purged from Amazon ECS record + /// keeping, and DescribeServices calls on those services return a + /// ServiceNotFoundException error. If you attempt to create a new service with the same name as an existing service + /// in either ACTIVE or DRAINING status, you receive an + /// error. @Sendable @inlinable public func deleteService(_ input: DeleteServiceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteServiceResponse { @@ -660,22 +680,25 @@ public struct ECS: AWSService { logger: logger ) } - /// Deletes a specified service within a cluster. You can delete a service if you have no running tasks - /// in it and the desired task count is zero. If the service is actively maintaining tasks, you can't - /// delete it, and you must update the service to a desired task count of zero. For more information, see - /// UpdateService. When you delete a service, if there are still running tasks that require cleanup, the service - /// status moves from ACTIVE to DRAINING, and the service is no longer - /// visible in the console or in the ListServices API operation. - /// After all tasks have transitioned to either STOPPING or STOPPED status, - /// the service status moves from DRAINING to INACTIVE. Services in the - /// DRAINING or INACTIVE status can still be viewed with the DescribeServices API operation. However, in the future, INACTIVE services - /// may be cleaned up and purged from Amazon ECS record keeping, and DescribeServices calls on - /// those services return a ServiceNotFoundException error. If you attempt to create a new service with the same name as an existing service in either - /// ACTIVE or DRAINING status, you receive an error. + /// Deletes a specified service within a cluster. You can delete a service if you have no + /// running tasks in it and the desired task count is zero. If the service is actively + /// maintaining tasks, you can't delete it, and you must update the service to a desired + /// task count of zero. For more information, see UpdateService. When you delete a service, if there are still running tasks that require cleanup, + /// the service status moves from ACTIVE to DRAINING, and the + /// service is no longer visible in the console or in the ListServices + /// API operation. After all tasks have transitioned to either STOPPING or + /// STOPPED status, the service status moves from DRAINING + /// to INACTIVE. Services in the DRAINING or + /// INACTIVE status can still be viewed with the DescribeServices API operation. However, in the future, + /// INACTIVE services may be cleaned up and purged from Amazon ECS record + /// keeping, and DescribeServices calls on those services return a + /// ServiceNotFoundException error. If you attempt to create a new service with the same name as an existing service + /// in either ACTIVE or DRAINING status, you receive an + /// error. /// /// Parameters: /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service to delete. - /// - force: If true, allows you to delete a service even if it wasn't scaled down to zero tasks. + /// - force: If true, allows you to delete a service even if it wasn't scaled down to /// - service: The name of the service to delete. /// - logger: Logger use during operation @inlinable @@ -693,18 +716,21 @@ public struct ECS: AWSService { return try await self.deleteService(input, logger: logger) } - /// Deletes one or more task definitions. You must deregister a task definition revision before you delete it. For more information, see DeregisterTaskDefinition. When you delete a task definition revision, it is immediately transitions from the - /// INACTIVE to DELETE_IN_PROGRESS. Existing tasks and services that - /// reference a DELETE_IN_PROGRESS task definition revision continue to run without - /// disruption. Existing services that reference a DELETE_IN_PROGRESS task definition revision - /// can still scale up or down by modifying the service's desired count. You can't use a DELETE_IN_PROGRESS task definition revision to run new tasks or create - /// new services. You also can't update an existing service to reference a DELETE_IN_PROGRESS - /// task definition revision. A task definition revision will stay in DELETE_IN_PROGRESS status until all the - /// associated tasks and services have been terminated. When you delete all INACTIVE task definition revisions, the task definition name is not - /// displayed in the console and not returned in the API. If a task definition revisions are in the - /// DELETE_IN_PROGRESS state, the task definition name is displayed in the console and - /// returned in the API. The task definition name is retained by Amazon ECS and the revision is incremented the - /// next time you create a task definition with that name. + /// Deletes one or more task definitions. You must deregister a task definition revision before you delete it. For more + /// information, see DeregisterTaskDefinition. When you delete a task definition revision, it is immediately transitions from the + /// INACTIVE to DELETE_IN_PROGRESS. Existing tasks and + /// services that reference a DELETE_IN_PROGRESS task definition revision + /// continue to run without disruption. Existing services that reference a + /// DELETE_IN_PROGRESS task definition revision can still scale up or down + /// by modifying the service's desired count. You can't use a DELETE_IN_PROGRESS task definition revision to run new + /// tasks or create new services. You also can't update an existing service to reference a + /// DELETE_IN_PROGRESS task definition revision. A task definition revision will stay in DELETE_IN_PROGRESS status until + /// all the associated tasks and services have been terminated. When you delete all INACTIVE task definition revisions, the task + /// definition name is not displayed in the console and not returned in the API. If a task + /// definition revisions are in the DELETE_IN_PROGRESS state, the task + /// definition name is displayed in the console and returned in the API. The task definition + /// name is retained by Amazon ECS and the revision is incremented the next time you create a + /// task definition with that name. @Sendable @inlinable public func deleteTaskDefinitions(_ input: DeleteTaskDefinitionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteTaskDefinitionsResponse { @@ -717,21 +743,24 @@ public struct ECS: AWSService { logger: logger ) } - /// Deletes one or more task definitions. You must deregister a task definition revision before you delete it. For more information, see DeregisterTaskDefinition. When you delete a task definition revision, it is immediately transitions from the - /// INACTIVE to DELETE_IN_PROGRESS. Existing tasks and services that - /// reference a DELETE_IN_PROGRESS task definition revision continue to run without - /// disruption. Existing services that reference a DELETE_IN_PROGRESS task definition revision - /// can still scale up or down by modifying the service's desired count. You can't use a DELETE_IN_PROGRESS task definition revision to run new tasks or create - /// new services. You also can't update an existing service to reference a DELETE_IN_PROGRESS - /// task definition revision. A task definition revision will stay in DELETE_IN_PROGRESS status until all the - /// associated tasks and services have been terminated. When you delete all INACTIVE task definition revisions, the task definition name is not - /// displayed in the console and not returned in the API. If a task definition revisions are in the - /// DELETE_IN_PROGRESS state, the task definition name is displayed in the console and - /// returned in the API. The task definition name is retained by Amazon ECS and the revision is incremented the - /// next time you create a task definition with that name. + /// Deletes one or more task definitions. You must deregister a task definition revision before you delete it. For more + /// information, see DeregisterTaskDefinition. When you delete a task definition revision, it is immediately transitions from the + /// INACTIVE to DELETE_IN_PROGRESS. Existing tasks and + /// services that reference a DELETE_IN_PROGRESS task definition revision + /// continue to run without disruption. Existing services that reference a + /// DELETE_IN_PROGRESS task definition revision can still scale up or down + /// by modifying the service's desired count. You can't use a DELETE_IN_PROGRESS task definition revision to run new + /// tasks or create new services. You also can't update an existing service to reference a + /// DELETE_IN_PROGRESS task definition revision. A task definition revision will stay in DELETE_IN_PROGRESS status until + /// all the associated tasks and services have been terminated. When you delete all INACTIVE task definition revisions, the task + /// definition name is not displayed in the console and not returned in the API. If a task + /// definition revisions are in the DELETE_IN_PROGRESS state, the task + /// definition name is displayed in the console and returned in the API. The task definition + /// name is retained by Amazon ECS and the revision is incremented the next time you create a + /// task definition with that name. /// /// Parameters: - /// - taskDefinitions: The family and revision (family:revision) or full Amazon Resource Name (ARN) of + /// - taskDefinitions: The family and revision (family:revision) or /// - logger: Logger use during operation @inlinable public func deleteTaskDefinitions( @@ -745,8 +774,7 @@ public struct ECS: AWSService { } /// Deletes a specified task set within a service. This is used when a service uses the - /// EXTERNAL deployment controller type. For more information, see Amazon ECS deployment - /// types in the Amazon Elastic Container Service Developer Guide. + /// EXTERNAL deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. @Sendable @inlinable public func deleteTaskSet(_ input: DeleteTaskSetRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteTaskSetResponse { @@ -760,13 +788,12 @@ public struct ECS: AWSService { ) } /// Deletes a specified task set within a service. This is used when a service uses the - /// EXTERNAL deployment controller type. For more information, see Amazon ECS deployment - /// types in the Amazon Elastic Container Service Developer Guide. + /// EXTERNAL deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. /// /// Parameters: - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task set found in to - /// - force: If true, you can delete a task set even if it hasn't been scaled down to zero. - /// - service: The short name or full Amazon Resource Name (ARN) of the service that hosts the task set to delete. + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task + /// - force: If true, you can delete a task set even if it hasn't been scaled down to + /// - service: The short name or full Amazon Resource Name (ARN) of the service that hosts the task set to /// - taskSet: The task set ID or full Amazon Resource Name (ARN) of the task set to delete. /// - logger: Logger use during operation @inlinable @@ -786,14 +813,15 @@ public struct ECS: AWSService { return try await self.deleteTaskSet(input, logger: logger) } - /// Deregisters an Amazon ECS container instance from the specified cluster. This instance is no longer - /// available to run tasks. If you intend to use the container instance for some other purpose after deregistration, we recommend - /// that you stop all of the tasks running on the container instance before deregistration. That prevents - /// any orphaned tasks from consuming resources. Deregistering a container instance removes the instance from a cluster, but it doesn't terminate the - /// EC2 instance. If you are finished using the instance, be sure to terminate it in the Amazon EC2 console to - /// stop billing. If you terminate a running container instance, Amazon ECS automatically deregisters the instance from - /// your cluster (stopped container instances or instances with disconnected agents aren't - /// automatically deregistered when terminated). + /// Deregisters an Amazon ECS container instance from the specified cluster. This instance is + /// no longer available to run tasks. If you intend to use the container instance for some other purpose after + /// deregistration, we recommend that you stop all of the tasks running on the container + /// instance before deregistration. That prevents any orphaned tasks from consuming + /// resources. Deregistering a container instance removes the instance from a cluster, but it doesn't + /// terminate the EC2 instance. If you are finished using the instance, be sure to terminate + /// it in the Amazon EC2 console to stop billing. If you terminate a running container instance, Amazon ECS automatically deregisters the + /// instance from your cluster (stopped container instances or instances with + /// disconnected agents aren't automatically deregistered when terminated). @Sendable @inlinable public func deregisterContainerInstance(_ input: DeregisterContainerInstanceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeregisterContainerInstanceResponse { @@ -806,19 +834,20 @@ public struct ECS: AWSService { logger: logger ) } - /// Deregisters an Amazon ECS container instance from the specified cluster. This instance is no longer - /// available to run tasks. If you intend to use the container instance for some other purpose after deregistration, we recommend - /// that you stop all of the tasks running on the container instance before deregistration. That prevents - /// any orphaned tasks from consuming resources. Deregistering a container instance removes the instance from a cluster, but it doesn't terminate the - /// EC2 instance. If you are finished using the instance, be sure to terminate it in the Amazon EC2 console to - /// stop billing. If you terminate a running container instance, Amazon ECS automatically deregisters the instance from - /// your cluster (stopped container instances or instances with disconnected agents aren't - /// automatically deregistered when terminated). + /// Deregisters an Amazon ECS container instance from the specified cluster. This instance is + /// no longer available to run tasks. If you intend to use the container instance for some other purpose after + /// deregistration, we recommend that you stop all of the tasks running on the container + /// instance before deregistration. That prevents any orphaned tasks from consuming + /// resources. Deregistering a container instance removes the instance from a cluster, but it doesn't + /// terminate the EC2 instance. If you are finished using the instance, be sure to terminate + /// it in the Amazon EC2 console to stop billing. If you terminate a running container instance, Amazon ECS automatically deregisters the + /// instance from your cluster (stopped container instances or instances with + /// disconnected agents aren't automatically deregistered when terminated). /// /// Parameters: - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instance to deregister. - /// - containerInstance: The container instance ID or full ARN of the container instance to deregister. For more information - /// - force: Forces the container instance to be deregistered. If you have tasks running on the container instance + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instance to + /// - containerInstance: The container instance ID or full ARN of the container instance to deregister. For + /// - force: Forces the container instance to be deregistered. If you have tasks running on the /// - logger: Logger use during operation @inlinable public func deregisterContainerInstance( @@ -835,18 +864,19 @@ public struct ECS: AWSService { return try await self.deregisterContainerInstance(input, logger: logger) } - /// Deregisters the specified task definition by family and revision. Upon deregistration, the task - /// definition is marked as INACTIVE. Existing tasks and services that reference an - /// INACTIVE task definition continue to run without disruption. Existing services that - /// reference an INACTIVE task definition can still scale up or down by modifying the - /// service's desired count. If you want to delete a task definition revision, you must first deregister - /// the task definition revision. You can't use an INACTIVE task definition to run new tasks or create new services, and - /// you can't update an existing service to reference an INACTIVE task definition. However, - /// there may be up to a 10-minute window following deregistration where these restrictions have not yet - /// taken effect. At this time, INACTIVE task definitions remain discoverable in your account - /// indefinitely. However, this behavior is subject to change in the future. We don't recommend that - /// you rely on INACTIVE task definitions persisting beyond the lifecycle of any - /// associated tasks and services. You must deregister a task definition revision before you delete it. For more information, see DeleteTaskDefinitions. + /// Deregisters the specified task definition by family and revision. Upon deregistration, + /// the task definition is marked as INACTIVE. Existing tasks and services that + /// reference an INACTIVE task definition continue to run without disruption. + /// Existing services that reference an INACTIVE task definition can still + /// scale up or down by modifying the service's desired count. If you want to delete a task + /// definition revision, you must first deregister the task definition revision. You can't use an INACTIVE task definition to run new tasks or create new + /// services, and you can't update an existing service to reference an INACTIVE + /// task definition. However, there may be up to a 10-minute window following deregistration + /// where these restrictions have not yet taken effect. At this time, INACTIVE task definitions remain discoverable in your + /// account indefinitely. However, this behavior is subject to change in the future. We + /// don't recommend that you rely on INACTIVE task definitions persisting + /// beyond the lifecycle of any associated tasks and services. You must deregister a task definition revision before you delete it. For more + /// information, see DeleteTaskDefinitions. @Sendable @inlinable public func deregisterTaskDefinition(_ input: DeregisterTaskDefinitionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeregisterTaskDefinitionResponse { @@ -859,21 +889,22 @@ public struct ECS: AWSService { logger: logger ) } - /// Deregisters the specified task definition by family and revision. Upon deregistration, the task - /// definition is marked as INACTIVE. Existing tasks and services that reference an - /// INACTIVE task definition continue to run without disruption. Existing services that - /// reference an INACTIVE task definition can still scale up or down by modifying the - /// service's desired count. If you want to delete a task definition revision, you must first deregister - /// the task definition revision. You can't use an INACTIVE task definition to run new tasks or create new services, and - /// you can't update an existing service to reference an INACTIVE task definition. However, - /// there may be up to a 10-minute window following deregistration where these restrictions have not yet - /// taken effect. At this time, INACTIVE task definitions remain discoverable in your account - /// indefinitely. However, this behavior is subject to change in the future. We don't recommend that - /// you rely on INACTIVE task definitions persisting beyond the lifecycle of any - /// associated tasks and services. You must deregister a task definition revision before you delete it. For more information, see DeleteTaskDefinitions. + /// Deregisters the specified task definition by family and revision. Upon deregistration, + /// the task definition is marked as INACTIVE. Existing tasks and services that + /// reference an INACTIVE task definition continue to run without disruption. + /// Existing services that reference an INACTIVE task definition can still + /// scale up or down by modifying the service's desired count. If you want to delete a task + /// definition revision, you must first deregister the task definition revision. You can't use an INACTIVE task definition to run new tasks or create new + /// services, and you can't update an existing service to reference an INACTIVE + /// task definition. However, there may be up to a 10-minute window following deregistration + /// where these restrictions have not yet taken effect. At this time, INACTIVE task definitions remain discoverable in your + /// account indefinitely. However, this behavior is subject to change in the future. We + /// don't recommend that you rely on INACTIVE task definitions persisting + /// beyond the lifecycle of any associated tasks and services. You must deregister a task definition revision before you delete it. For more + /// information, see DeleteTaskDefinitions. /// /// Parameters: - /// - taskDefinition: The family and revision (family:revision) or full Amazon Resource Name (ARN) of + /// - taskDefinition: The family and revision (family:revision) or /// - logger: Logger use during operation @inlinable public func deregisterTaskDefinition( @@ -902,9 +933,9 @@ public struct ECS: AWSService { /// Describes one or more of your capacity providers. /// /// Parameters: - /// - capacityProviders: The short name or full Amazon Resource Name (ARN) of one or more capacity providers. Up to 100 capacity - /// - include: Specifies whether or not you want to see the resource tags for the capacity provider. If - /// - maxResults: The maximum number of account setting results returned by DescribeCapacityProviders in + /// - capacityProviders: The short name or full Amazon Resource Name (ARN) of one or more capacity providers. Up to + /// - include: Specifies whether or not you want to see the resource tags for the capacity provider. + /// - maxResults: The maximum number of account setting results returned by /// - nextToken: The nextToken value returned from a previous paginated /// - logger: Logger use during operation @inlinable @@ -940,8 +971,8 @@ public struct ECS: AWSService { /// Describes one or more of your clusters. For CLI examples, see describe-clusters.rst on GitHub. /// /// Parameters: - /// - clusters: A list of up to 100 cluster names or full cluster Amazon Resource Name (ARN) entries. If you do not specify a cluster, the default cluster is assumed. - /// - include: Determines whether to include additional information about the clusters in the response. If this + /// - clusters: A list of up to 100 cluster names or full cluster Amazon Resource Name (ARN) entries. + /// - include: Determines whether to include additional information about the clusters in the /// - logger: Logger use during operation @inlinable public func describeClusters( @@ -956,8 +987,8 @@ public struct ECS: AWSService { return try await self.describeClusters(input, logger: logger) } - /// Describes one or more container instances. Returns metadata about each container instance - /// requested. + /// Describes one or more container instances. Returns metadata about each container + /// instance requested. @Sendable @inlinable public func describeContainerInstances(_ input: DescribeContainerInstancesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeContainerInstancesResponse { @@ -970,13 +1001,13 @@ public struct ECS: AWSService { logger: logger ) } - /// Describes one or more container instances. Returns metadata about each container instance - /// requested. + /// Describes one or more container instances. Returns metadata about each container + /// instance requested. /// /// Parameters: - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to describe. + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to /// - containerInstances: A list of up to 100 container instance IDs or full Amazon Resource Name (ARN) entries. - /// - include: Specifies whether you want to see the resource tags for the container instance. If TAGS + /// - include: Specifies whether you want to see the resource tags for the container instance. If /// - logger: Logger use during operation @inlinable public func describeContainerInstances( @@ -993,7 +1024,9 @@ public struct ECS: AWSService { return try await self.describeContainerInstances(input, logger: logger) } - /// Describes one or more of your service deployments. A service deployment happens when you release a software update for the service. For more information, see Amazon ECS service deployments. + /// Describes one or more of your service deployments. A service deployment happens when you release a software update for the service. For + /// more information, see Amazon ECS service + /// deployments. @Sendable @inlinable public func describeServiceDeployments(_ input: DescribeServiceDeploymentsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeServiceDeploymentsResponse { @@ -1006,7 +1039,9 @@ public struct ECS: AWSService { logger: logger ) } - /// Describes one or more of your service deployments. A service deployment happens when you release a software update for the service. For more information, see Amazon ECS service deployments. + /// Describes one or more of your service deployments. A service deployment happens when you release a software update for the service. For + /// more information, see Amazon ECS service + /// deployments. /// /// Parameters: /// - serviceDeploymentArns: The ARN of the service deployment. You can specify a maximum of 20 ARNs. @@ -1022,8 +1057,8 @@ public struct ECS: AWSService { return try await self.describeServiceDeployments(input, logger: logger) } - /// Describes one or more service revisions. A service revision is a version of the service that includes the values for the Amazon ECS - /// resources (for example, task definition) and the environment resources (for example, + /// Describes one or more service revisions. A service revision is a version of the service that includes the values for the Amazon + /// ECS resources (for example, task definition) and the environment resources (for example, /// load balancers, subnets, and security groups). For more information, see Amazon ECS service revisions. You can't describe a service revision that was created before October 25, 2024. @Sendable @inlinable @@ -1037,12 +1072,12 @@ public struct ECS: AWSService { logger: logger ) } - /// Describes one or more service revisions. A service revision is a version of the service that includes the values for the Amazon ECS - /// resources (for example, task definition) and the environment resources (for example, + /// Describes one or more service revisions. A service revision is a version of the service that includes the values for the Amazon + /// ECS resources (for example, task definition) and the environment resources (for example, /// load balancers, subnets, and security groups). For more information, see Amazon ECS service revisions. You can't describe a service revision that was created before October 25, 2024. /// /// Parameters: - /// - serviceRevisionArns: The ARN of the service revision. You can specify a maximum of 20 ARNs. You can call ListServiceDeployments to + /// - serviceRevisionArns: The ARN of the service revision. You can specify a maximum of 20 ARNs. You can call ListServiceDeployments to get the ARNs. /// - logger: Logger use during operation @inlinable public func describeServiceRevisions( @@ -1072,8 +1107,8 @@ public struct ECS: AWSService { /// /// Parameters: /// - cluster: The short name or full Amazon Resource Name (ARN)the cluster that hosts the service to describe. - /// - include: Determines whether you want to see the resource tags for the service. If TAGS is - /// - services: A list of services to describe. You may specify up to 10 services to describe in a single + /// - include: Determines whether you want to see the resource tags for the service. If + /// - services: A list of services to describe. You may specify up to 10 services to describe in a /// - logger: Logger use during operation @inlinable public func describeServices( @@ -1090,10 +1125,11 @@ public struct ECS: AWSService { return try await self.describeServices(input, logger: logger) } - /// Describes a task definition. You can specify a family and revision to find - /// information about a specific task definition, or you can simply specify the family to find the latest - /// ACTIVE revision in that family. You can only describe INACTIVE task definitions while an active task or service - /// references them. + /// Describes a task definition. You can specify a family and + /// revision to find information about a specific task definition, or you + /// can simply specify the family to find the latest ACTIVE revision in that + /// family. You can only describe INACTIVE task definitions while an active task + /// or service references them. @Sendable @inlinable public func describeTaskDefinition(_ input: DescribeTaskDefinitionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeTaskDefinitionResponse { @@ -1106,14 +1142,15 @@ public struct ECS: AWSService { logger: logger ) } - /// Describes a task definition. You can specify a family and revision to find - /// information about a specific task definition, or you can simply specify the family to find the latest - /// ACTIVE revision in that family. You can only describe INACTIVE task definitions while an active task or service - /// references them. + /// Describes a task definition. You can specify a family and + /// revision to find information about a specific task definition, or you + /// can simply specify the family to find the latest ACTIVE revision in that + /// family. You can only describe INACTIVE task definitions while an active task + /// or service references them. /// /// Parameters: - /// - include: Determines whether to see the resource tags for the task definition. If TAGS is - /// - taskDefinition: The family for the latest ACTIVE revision, family and + /// - include: Determines whether to see the resource tags for the task definition. If + /// - taskDefinition: The family for the latest ACTIVE revision, /// - logger: Logger use during operation @inlinable public func describeTaskDefinition( @@ -1128,8 +1165,9 @@ public struct ECS: AWSService { return try await self.describeTaskDefinition(input, logger: logger) } - /// Describes the task sets in the specified cluster and service. This is used when a service uses the - /// EXTERNAL deployment controller type. For more information, see Amazon ECS Deployment + /// Describes the task sets in the specified cluster and service. This is used when a + /// service uses the EXTERNAL deployment controller type. For more information, + /// see Amazon ECS Deployment /// Types in the Amazon Elastic Container Service Developer Guide. @Sendable @inlinable @@ -1143,15 +1181,16 @@ public struct ECS: AWSService { logger: logger ) } - /// Describes the task sets in the specified cluster and service. This is used when a service uses the - /// EXTERNAL deployment controller type. For more information, see Amazon ECS Deployment + /// Describes the task sets in the specified cluster and service. This is used when a + /// service uses the EXTERNAL deployment controller type. For more information, + /// see Amazon ECS Deployment /// Types in the Amazon Elastic Container Service Developer Guide. /// /// Parameters: - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task sets exist - /// - include: Specifies whether to see the resource tags for the task set. If TAGS is specified, the + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task + /// - include: Specifies whether to see the resource tags for the task set. If TAGS is /// - service: The short name or full Amazon Resource Name (ARN) of the service that the task sets exist in. - /// - taskSets: The ID or full Amazon Resource Name (ARN) of task sets to describe. + /// - taskSets: The ID or full Amazon Resource Name (ARN) of task sets to /// - logger: Logger use during operation @inlinable public func describeTaskSets( @@ -1170,9 +1209,9 @@ public struct ECS: AWSService { return try await self.describeTaskSets(input, logger: logger) } - /// Describes a specified task or tasks. Currently, stopped tasks appear in the returned results for at least one hour. If you have tasks with tags, and then delete the cluster, the tagged tasks are returned in the - /// response. If you create a new cluster with the same name as the deleted cluster, the tagged tasks are - /// not included in the response. + /// Describes a specified task or tasks. Currently, stopped tasks appear in the returned results for at least one hour. If you have tasks with tags, and then delete the cluster, the tagged tasks are + /// returned in the response. If you create a new cluster with the same name as the deleted + /// cluster, the tagged tasks are not included in the response. @Sendable @inlinable public func describeTasks(_ input: DescribeTasksRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeTasksResponse { @@ -1185,13 +1224,13 @@ public struct ECS: AWSService { logger: logger ) } - /// Describes a specified task or tasks. Currently, stopped tasks appear in the returned results for at least one hour. If you have tasks with tags, and then delete the cluster, the tagged tasks are returned in the - /// response. If you create a new cluster with the same name as the deleted cluster, the tagged tasks are - /// not included in the response. + /// Describes a specified task or tasks. Currently, stopped tasks appear in the returned results for at least one hour. If you have tasks with tags, and then delete the cluster, the tagged tasks are + /// returned in the response. If you create a new cluster with the same name as the deleted + /// cluster, the tagged tasks are not included in the response. /// /// Parameters: - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task or tasks to describe. - /// - include: Specifies whether you want to see the resource tags for the task. If TAGS is specified, + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task or tasks to + /// - include: Specifies whether you want to see the resource tags for the task. If TAGS /// - tasks: A list of up to 100 task IDs or full ARN entries. /// - logger: Logger use during operation @inlinable @@ -1225,8 +1264,8 @@ public struct ECS: AWSService { /// This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent. Returns an endpoint for the Amazon ECS agent to poll for updates. /// /// Parameters: - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that the container instance belongs to. - /// - containerInstance: The container instance ID or full ARN of the container instance. For more information about the + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that the container instance belongs + /// - containerInstance: The container instance ID or full ARN of the container instance. For more /// - logger: Logger use during operation @inlinable public func discoverPollEndpoint( @@ -1241,10 +1280,12 @@ public struct ECS: AWSService { return try await self.discoverPollEndpoint(input, logger: logger) } - /// Runs a command remotely on a container within a task. If you use a condition key in your IAM policy to refine the conditions for the policy statement, - /// for example limit the actions to a specific cluster, you receive an AccessDeniedException - /// when there is a mismatch between the condition key value and the corresponding parameter value. For information about required permissions and considerations, see Using Amazon ECS Exec for debugging in the - /// Amazon ECS Developer Guide. + /// Runs a command remotely on a container within a task. If you use a condition key in your IAM policy to refine the conditions for the + /// policy statement, for example limit the actions to a specific cluster, you receive an + /// AccessDeniedException when there is a mismatch between the condition + /// key value and the corresponding parameter value. For information about required permissions and considerations, see Using Amazon ECS + /// Exec for debugging in the Amazon ECS Developer Guide. + /// @Sendable @inlinable public func executeCommand(_ input: ExecuteCommandRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ExecuteCommandResponse { @@ -1257,15 +1298,17 @@ public struct ECS: AWSService { logger: logger ) } - /// Runs a command remotely on a container within a task. If you use a condition key in your IAM policy to refine the conditions for the policy statement, - /// for example limit the actions to a specific cluster, you receive an AccessDeniedException - /// when there is a mismatch between the condition key value and the corresponding parameter value. For information about required permissions and considerations, see Using Amazon ECS Exec for debugging in the - /// Amazon ECS Developer Guide. + /// Runs a command remotely on a container within a task. If you use a condition key in your IAM policy to refine the conditions for the + /// policy statement, for example limit the actions to a specific cluster, you receive an + /// AccessDeniedException when there is a mismatch between the condition + /// key value and the corresponding parameter value. For information about required permissions and considerations, see Using Amazon ECS + /// Exec for debugging in the Amazon ECS Developer Guide. + /// /// /// Parameters: /// - cluster: The Amazon Resource Name (ARN) or short name of the cluster the task is running in. /// - command: The command to run on the container. - /// - container: The name of the container to execute the command on. A container name only needs to be specified for + /// - container: The name of the container to execute the command on. A container name only needs to be /// - interactive: Use this flag to run your command in interactive mode. /// - task: The Amazon Resource Name (ARN) or ID of the task the container is part of. /// - logger: Logger use during operation @@ -1304,7 +1347,7 @@ public struct ECS: AWSService { /// Retrieves the protection status of tasks in an Amazon ECS service. /// /// Parameters: - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task sets exist + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task /// - tasks: A list of up to 100 task IDs or full ARN entries. /// - logger: Logger use during operation @inlinable @@ -1336,12 +1379,12 @@ public struct ECS: AWSService { /// Lists the account settings for a specified principal. /// /// Parameters: - /// - effectiveSettings: Determines whether to return the effective settings. If true, the account settings for - /// - maxResults: The maximum number of account setting results returned by ListAccountSettings in + /// - effectiveSettings: Determines whether to return the effective settings. If true, the account + /// - maxResults: The maximum number of account setting results returned by /// - name: The name of the account setting you want to list the settings for. - /// - nextToken: The nextToken value returned from a ListAccountSettings request indicating - /// - principalArn: The ARN of the principal, which can be a user, role, or the root user. If this field is omitted, the - /// - value: The value of the account settings to filter results with. You must also specify an account setting + /// - nextToken: The nextToken value returned from a ListAccountSettings + /// - principalArn: The ARN of the principal, which can be a user, role, or the root user. If this field is + /// - value: The value of the account settings to filter results with. You must also specify an /// - logger: Logger use during operation @inlinable public func listAccountSettings( @@ -1364,11 +1407,12 @@ public struct ECS: AWSService { return try await self.listAccountSettings(input, logger: logger) } - /// Lists the attributes for Amazon ECS resources within a specified target type and cluster. When you specify - /// a target type and cluster, ListAttributes returns a list of attribute objects, one for - /// each attribute on each resource. You can filter the list of results to a single attribute name to only - /// return results that have that name. You can also filter the results by attribute name and value. You - /// can do this, for example, to see which container instances in a cluster are running a Linux AMI + /// Lists the attributes for Amazon ECS resources within a specified target type and cluster. + /// When you specify a target type and cluster, ListAttributes returns a list + /// of attribute objects, one for each attribute on each resource. You can filter the list + /// of results to a single attribute name to only return results that have that name. You + /// can also filter the results by attribute name and value. You can do this, for example, + /// to see which container instances in a cluster are running a Linux AMI /// (ecs.os-type=linux). @Sendable @inlinable @@ -1382,19 +1426,20 @@ public struct ECS: AWSService { logger: logger ) } - /// Lists the attributes for Amazon ECS resources within a specified target type and cluster. When you specify - /// a target type and cluster, ListAttributes returns a list of attribute objects, one for - /// each attribute on each resource. You can filter the list of results to a single attribute name to only - /// return results that have that name. You can also filter the results by attribute name and value. You - /// can do this, for example, to see which container instances in a cluster are running a Linux AMI + /// Lists the attributes for Amazon ECS resources within a specified target type and cluster. + /// When you specify a target type and cluster, ListAttributes returns a list + /// of attribute objects, one for each attribute on each resource. You can filter the list + /// of results to a single attribute name to only return results that have that name. You + /// can also filter the results by attribute name and value. You can do this, for example, + /// to see which container instances in a cluster are running a Linux AMI /// (ecs.os-type=linux). /// /// Parameters: /// - attributeName: The name of the attribute to filter the results with. - /// - attributeValue: The value of the attribute to filter results with. You must also specify an attribute name to use - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster to list attributes. If you do not specify a cluster, the default cluster is assumed. - /// - maxResults: The maximum number of cluster results that ListAttributes returned in paginated output. - /// - nextToken: The nextToken value returned from a ListAttributes request indicating that + /// - attributeValue: The value of the attribute to filter results with. You must also specify an attribute + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster to list attributes. + /// - maxResults: The maximum number of cluster results that ListAttributes returned in + /// - nextToken: The nextToken value returned from a ListAttributes request /// - targetType: The type of the target to list attributes with. /// - logger: Logger use during operation @inlinable @@ -1434,8 +1479,8 @@ public struct ECS: AWSService { /// Returns a list of existing clusters. /// /// Parameters: - /// - maxResults: The maximum number of cluster results that ListClusters returned in paginated output. - /// - nextToken: The nextToken value returned from a ListClusters request indicating that + /// - maxResults: The maximum number of cluster results that ListClusters returned in + /// - nextToken: The nextToken value returned from a ListClusters request /// - logger: Logger use during operation @inlinable public func listClusters( @@ -1450,10 +1495,9 @@ public struct ECS: AWSService { return try await self.listClusters(input, logger: logger) } - /// Returns a list of container instances in a specified cluster. You can filter the results of a - /// ListContainerInstances operation with cluster query language statements inside the - /// filter parameter. For more information, see Cluster - /// Query Language in the Amazon Elastic Container Service Developer Guide. + /// Returns a list of container instances in a specified cluster. You can filter the + /// results of a ListContainerInstances operation with cluster query language + /// statements inside the filter parameter. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide. @Sendable @inlinable public func listContainerInstances(_ input: ListContainerInstancesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListContainerInstancesResponse { @@ -1466,17 +1510,16 @@ public struct ECS: AWSService { logger: logger ) } - /// Returns a list of container instances in a specified cluster. You can filter the results of a - /// ListContainerInstances operation with cluster query language statements inside the - /// filter parameter. For more information, see Cluster - /// Query Language in the Amazon Elastic Container Service Developer Guide. + /// Returns a list of container instances in a specified cluster. You can filter the + /// results of a ListContainerInstances operation with cluster query language + /// statements inside the filter parameter. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide. /// /// Parameters: - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to list. - /// - filter: You can filter the results of a ListContainerInstances operation with cluster query - /// - maxResults: The maximum number of container instance results that ListContainerInstances returned in - /// - nextToken: The nextToken value returned from a ListContainerInstances request - /// - status: Filters the container instances by status. For example, if you specify the DRAINING + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to + /// - filter: You can filter the results of a ListContainerInstances operation with + /// - maxResults: The maximum number of container instance results that + /// - nextToken: The nextToken value returned from a ListContainerInstances + /// - status: Filters the container instances by status. For example, if you specify the /// - logger: Logger use during operation @inlinable public func listContainerInstances( @@ -1497,7 +1540,8 @@ public struct ECS: AWSService { return try await self.listContainerInstances(input, logger: logger) } - /// This operation lists all the service deployments that meet the specified filter criteria. A service deployment happens when you release a softwre update for the service. You + /// This operation lists all the service deployments that meet the specified filter + /// criteria. A service deployment happens when you release a softwre update for the service. You /// route traffic from the running service revisions to the new service revison and control /// the number of running tasks. This API returns the values that you use for the request parameters in DescribeServiceRevisions. @Sendable @@ -1512,17 +1556,18 @@ public struct ECS: AWSService { logger: logger ) } - /// This operation lists all the service deployments that meet the specified filter criteria. A service deployment happens when you release a softwre update for the service. You + /// This operation lists all the service deployments that meet the specified filter + /// criteria. A service deployment happens when you release a softwre update for the service. You /// route traffic from the running service revisions to the new service revison and control /// the number of running tasks. This API returns the values that you use for the request parameters in DescribeServiceRevisions. /// /// Parameters: - /// - cluster: The cluster that hosts the service. This can either be the cluster name or ARN. Starting - /// - createdAt: An optional filter you can use to narrow the results by the service creation date. If you do - /// - maxResults: The maximum number of service deployment results that ListServiceDeployments - /// - nextToken: The nextToken value returned from a ListServiceDeployments request indicating that more results are available to fulfill the request and further calls are needed. If you provided maxResults, it's possible the number of results is fewer than maxResults. + /// - cluster: The cluster that hosts the service. This can either be the cluster name or ARN. + /// - createdAt: An optional filter you can use to narrow the results by the service creation date. If + /// - maxResults: The maximum number of service deployment results that + /// - nextToken: The nextToken value returned from a ListServiceDeployments /// - service: The ARN or name of the service - /// - status: An optional filter you can use to narrow the results. If you do not specify a status, then + /// - status: An optional filter you can use to narrow the results. If you do not specify a status, /// - logger: Logger use during operation @inlinable public func listServiceDeployments( @@ -1545,8 +1590,8 @@ public struct ECS: AWSService { return try await self.listServiceDeployments(input, logger: logger) } - /// Returns a list of services. You can filter the results by cluster, launch type, and scheduling - /// strategy. + /// Returns a list of services. You can filter the results by cluster, launch type, and + /// scheduling strategy. @Sendable @inlinable public func listServices(_ input: ListServicesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListServicesResponse { @@ -1559,15 +1604,15 @@ public struct ECS: AWSService { logger: logger ) } - /// Returns a list of services. You can filter the results by cluster, launch type, and scheduling - /// strategy. + /// Returns a list of services. You can filter the results by cluster, launch type, and + /// scheduling strategy. /// /// Parameters: - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster to use when filtering the ListServices + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster to use when filtering the /// - launchType: The launch type to use when filtering the ListServices results. - /// - maxResults: The maximum number of service results that ListServices returned in paginated output. - /// - nextToken: The nextToken value returned from a ListServices request indicating that - /// - schedulingStrategy: The scheduling strategy to use when filtering the ListServices results. + /// - maxResults: The maximum number of service results that ListServices returned in + /// - nextToken: The nextToken value returned from a ListServices request + /// - schedulingStrategy: The scheduling strategy to use when filtering the ListServices /// - logger: Logger use during operation @inlinable public func listServices( @@ -1588,10 +1633,11 @@ public struct ECS: AWSService { return try await self.listServices(input, logger: logger) } - /// This operation lists all of the services that are associated with a Cloud Map namespace. This list - /// might include services in different clusters. In contrast, ListServices can only list - /// services in one cluster at a time. If you need to filter the list of services in a single cluster by - /// various parameters, use ListServices. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide. + /// This operation lists all of the services that are associated with a Cloud Map + /// namespace. This list might include services in different clusters. In contrast, + /// ListServices can only list services in one cluster at a time. If you + /// need to filter the list of services in a single cluster by various parameters, use + /// ListServices. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide. @Sendable @inlinable public func listServicesByNamespace(_ input: ListServicesByNamespaceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListServicesByNamespaceResponse { @@ -1604,15 +1650,16 @@ public struct ECS: AWSService { logger: logger ) } - /// This operation lists all of the services that are associated with a Cloud Map namespace. This list - /// might include services in different clusters. In contrast, ListServices can only list - /// services in one cluster at a time. If you need to filter the list of services in a single cluster by - /// various parameters, use ListServices. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide. + /// This operation lists all of the services that are associated with a Cloud Map + /// namespace. This list might include services in different clusters. In contrast, + /// ListServices can only list services in one cluster at a time. If you + /// need to filter the list of services in a single cluster by various parameters, use + /// ListServices. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide. /// /// Parameters: - /// - maxResults: The maximum number of service results that ListServicesByNamespace returns in paginated + /// - maxResults: The maximum number of service results that ListServicesByNamespace /// - namespace: The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace to list the services in. Tasks that run in a namespace can use short names to connect - /// - nextToken: The nextToken value that's returned from a ListServicesByNamespace request. + /// - nextToken: The nextToken value that's returned from a /// - logger: Logger use during operation @inlinable public func listServicesByNamespace( @@ -1645,7 +1692,7 @@ public struct ECS: AWSService { /// List the tags for an Amazon ECS resource. /// /// Parameters: - /// - resourceArn: The Amazon Resource Name (ARN) that identifies the resource to list the tags for. Currently, the supported resources + /// - resourceArn: The Amazon Resource Name (ARN) that identifies the resource to list the tags for. Currently, the /// - logger: Logger use during operation @inlinable public func listTagsForResource( @@ -1658,10 +1705,12 @@ public struct ECS: AWSService { return try await self.listTagsForResource(input, logger: logger) } - /// Returns a list of task definition families that are registered to your account. This list includes - /// task definition families that no longer have any ACTIVE task definition revisions. You can filter out task definition families that don't contain any ACTIVE task - /// definition revisions by setting the status parameter to ACTIVE. You can also - /// filter the results with the familyPrefix parameter. + /// Returns a list of task definition families that are registered to your account. This + /// list includes task definition families that no longer have any ACTIVE task + /// definition revisions. You can filter out task definition families that don't contain any ACTIVE + /// task definition revisions by setting the status parameter to + /// ACTIVE. You can also filter the results with the + /// familyPrefix parameter. @Sendable @inlinable public func listTaskDefinitionFamilies(_ input: ListTaskDefinitionFamiliesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTaskDefinitionFamiliesResponse { @@ -1674,16 +1723,18 @@ public struct ECS: AWSService { logger: logger ) } - /// Returns a list of task definition families that are registered to your account. This list includes - /// task definition families that no longer have any ACTIVE task definition revisions. You can filter out task definition families that don't contain any ACTIVE task - /// definition revisions by setting the status parameter to ACTIVE. You can also - /// filter the results with the familyPrefix parameter. + /// Returns a list of task definition families that are registered to your account. This + /// list includes task definition families that no longer have any ACTIVE task + /// definition revisions. You can filter out task definition families that don't contain any ACTIVE + /// task definition revisions by setting the status parameter to + /// ACTIVE. You can also filter the results with the + /// familyPrefix parameter. /// /// Parameters: /// - familyPrefix: The familyPrefix is a string that's used to filter the results of - /// - maxResults: The maximum number of task definition family results that ListTaskDefinitionFamilies - /// - nextToken: The nextToken value returned from a ListTaskDefinitionFamilies request - /// - status: The task definition family status to filter the ListTaskDefinitionFamilies results with. + /// - maxResults: The maximum number of task definition family results that + /// - nextToken: The nextToken value returned from a + /// - status: The task definition family status to filter the /// - logger: Logger use during operation @inlinable public func listTaskDefinitionFamilies( @@ -1702,9 +1753,9 @@ public struct ECS: AWSService { return try await self.listTaskDefinitionFamilies(input, logger: logger) } - /// Returns a list of task definitions that are registered to your account. You can filter the results by - /// family name with the familyPrefix parameter or by status with the status - /// parameter. + /// Returns a list of task definitions that are registered to your account. You can filter + /// the results by family name with the familyPrefix parameter or by status + /// with the status parameter. @Sendable @inlinable public func listTaskDefinitions(_ input: ListTaskDefinitionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTaskDefinitionsResponse { @@ -1717,16 +1768,16 @@ public struct ECS: AWSService { logger: logger ) } - /// Returns a list of task definitions that are registered to your account. You can filter the results by - /// family name with the familyPrefix parameter or by status with the status - /// parameter. + /// Returns a list of task definitions that are registered to your account. You can filter + /// the results by family name with the familyPrefix parameter or by status + /// with the status parameter. /// /// Parameters: - /// - familyPrefix: The full family name to filter the ListTaskDefinitions results with. Specifying a - /// - maxResults: The maximum number of task definition results that ListTaskDefinitions returned in - /// - nextToken: The nextToken value returned from a ListTaskDefinitions request indicating - /// - sort: The order to sort the results in. Valid values are ASC and DESC. By - /// - status: The task definition status to filter the ListTaskDefinitions results with. By default, + /// - familyPrefix: The full family name to filter the ListTaskDefinitions results with. + /// - maxResults: The maximum number of task definition results that ListTaskDefinitions + /// - nextToken: The nextToken value returned from a ListTaskDefinitions + /// - sort: The order to sort the results in. Valid values are ASC and + /// - status: The task definition status to filter the ListTaskDefinitions results /// - logger: Logger use during operation @inlinable public func listTaskDefinitions( @@ -1747,9 +1798,9 @@ public struct ECS: AWSService { return try await self.listTaskDefinitions(input, logger: logger) } - /// Returns a list of tasks. You can filter the results by cluster, task definition family, container - /// instance, launch type, what IAM principal started the task, or by the desired status of the - /// task. Recently stopped tasks might appear in the returned results. + /// Returns a list of tasks. You can filter the results by cluster, task definition + /// family, container instance, launch type, what IAM principal started the task, or by + /// the desired status of the task. Recently stopped tasks might appear in the returned results. @Sendable @inlinable public func listTasks(_ input: ListTasksRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTasksResponse { @@ -1762,20 +1813,20 @@ public struct ECS: AWSService { logger: logger ) } - /// Returns a list of tasks. You can filter the results by cluster, task definition family, container - /// instance, launch type, what IAM principal started the task, or by the desired status of the - /// task. Recently stopped tasks might appear in the returned results. + /// Returns a list of tasks. You can filter the results by cluster, task definition + /// family, container instance, launch type, what IAM principal started the task, or by + /// the desired status of the task. Recently stopped tasks might appear in the returned results. /// /// Parameters: - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster to use when filtering the ListTasks - /// - containerInstance: The container instance ID or full ARN of the container instance to use when filtering the - /// - desiredStatus: The task desired status to use when filtering the ListTasks results. Specifying a - /// - family: The name of the task definition family to use when filtering the ListTasks results. + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster to use when filtering the + /// - containerInstance: The container instance ID or full ARN of the container instance to use when + /// - desiredStatus: The task desired status to use when filtering the ListTasks results. + /// - family: The name of the task definition family to use when filtering the /// - launchType: The launch type to use when filtering the ListTasks results. - /// - maxResults: The maximum number of task results that ListTasks returned in paginated output. When - /// - nextToken: The nextToken value returned from a ListTasks request indicating that more - /// - serviceName: The name of the service to use when filtering the ListTasks results. Specifying a - /// - startedBy: The startedBy value to filter the task results with. Specifying a startedBy + /// - maxResults: The maximum number of task results that ListTasks returned in paginated + /// - nextToken: The nextToken value returned from a ListTasks request + /// - serviceName: The name of the service to use when filtering the ListTasks results. + /// - startedBy: The startedBy value to filter the task results with. Specifying a /// - logger: Logger use during operation @inlinable public func listTasks( @@ -1804,9 +1855,10 @@ public struct ECS: AWSService { return try await self.listTasks(input, logger: logger) } - /// Modifies an account setting. Account settings are set on a per-Region basis. If you change the root user account setting, the default settings are reset for users and roles that do - /// not have specified individual account settings. For more information, see Account Settings in the - /// Amazon Elastic Container Service Developer Guide. + /// Modifies an account setting. Account settings are set on a per-Region basis. If you change the root user account setting, the default settings are reset for users and + /// roles that do not have specified individual account settings. For more information, see + /// Account + /// Settings in the Amazon Elastic Container Service Developer Guide. @Sendable @inlinable public func putAccountSetting(_ input: PutAccountSettingRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutAccountSettingResponse { @@ -1819,13 +1871,14 @@ public struct ECS: AWSService { logger: logger ) } - /// Modifies an account setting. Account settings are set on a per-Region basis. If you change the root user account setting, the default settings are reset for users and roles that do - /// not have specified individual account settings. For more information, see Account Settings in the - /// Amazon Elastic Container Service Developer Guide. + /// Modifies an account setting. Account settings are set on a per-Region basis. If you change the root user account setting, the default settings are reset for users and + /// roles that do not have specified individual account settings. For more information, see + /// Account + /// Settings in the Amazon Elastic Container Service Developer Guide. /// /// Parameters: - /// - name: The Amazon ECS account setting name to modify. The following are the valid values for the account setting name. serviceLongArnFormat - When modified, the Amazon Resource Name (ARN) and - /// - principalArn: The ARN of the principal, which can be a user, role, or the root user. If you specify the root user, it + /// - name: The Amazon ECS account setting name to modify. The following are the valid values for the account setting name. serviceLongArnFormat - When modified, the Amazon Resource Name + /// - principalArn: The ARN of the principal, which can be a user, role, or the root user. If you specify /// - value: The account setting value for the specified principal ARN. Accepted values are /// - logger: Logger use during operation @inlinable @@ -1843,8 +1896,8 @@ public struct ECS: AWSService { return try await self.putAccountSetting(input, logger: logger) } - /// Modifies an account setting for all users on an account for whom no individual account setting has - /// been specified. Account settings are set on a per-Region basis. + /// Modifies an account setting for all users on an account for whom no individual account + /// setting has been specified. Account settings are set on a per-Region basis. @Sendable @inlinable public func putAccountSettingDefault(_ input: PutAccountSettingDefaultRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutAccountSettingDefaultResponse { @@ -1857,11 +1910,11 @@ public struct ECS: AWSService { logger: logger ) } - /// Modifies an account setting for all users on an account for whom no individual account setting has - /// been specified. Account settings are set on a per-Region basis. + /// Modifies an account setting for all users on an account for whom no individual account + /// setting has been specified. Account settings are set on a per-Region basis. /// /// Parameters: - /// - name: The resource name for which to modify the account setting. The following are the valid values for the account setting name. serviceLongArnFormat - When modified, the Amazon Resource Name (ARN) and + /// - name: The resource name for which to modify the account setting. The following are the valid values for the account setting name. serviceLongArnFormat - When modified, the Amazon Resource Name /// - value: The account setting value for the specified principal ARN. Accepted values are /// - logger: Logger use during operation @inlinable @@ -1877,9 +1930,9 @@ public struct ECS: AWSService { return try await self.putAccountSettingDefault(input, logger: logger) } - /// Create or update an attribute on an Amazon ECS resource. If the attribute doesn't exist, it's created. If - /// the attribute exists, its value is replaced with the specified value. To delete an attribute, use - /// DeleteAttributes. For more information, see Attributes in the Amazon Elastic Container Service Developer Guide. + /// Create or update an attribute on an Amazon ECS resource. If the attribute doesn't exist, + /// it's created. If the attribute exists, its value is replaced with the specified value. + /// To delete an attribute, use DeleteAttributes. For more information, see Attributes in the Amazon Elastic Container Service Developer Guide. @Sendable @inlinable public func putAttributes(_ input: PutAttributesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutAttributesResponse { @@ -1892,13 +1945,13 @@ public struct ECS: AWSService { logger: logger ) } - /// Create or update an attribute on an Amazon ECS resource. If the attribute doesn't exist, it's created. If - /// the attribute exists, its value is replaced with the specified value. To delete an attribute, use - /// DeleteAttributes. For more information, see Attributes in the Amazon Elastic Container Service Developer Guide. + /// Create or update an attribute on an Amazon ECS resource. If the attribute doesn't exist, + /// it's created. If the attribute exists, its value is replaced with the specified value. + /// To delete an attribute, use DeleteAttributes. For more information, see Attributes in the Amazon Elastic Container Service Developer Guide. /// /// Parameters: - /// - attributes: The attributes to apply to your resource. You can specify up to 10 custom attributes for each - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that contains the resource to apply attributes. + /// - attributes: The attributes to apply to your resource. You can specify up to 10 custom attributes + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that contains the resource to apply /// - logger: Logger use during operation @inlinable public func putAttributes( @@ -1913,16 +1966,18 @@ public struct ECS: AWSService { return try await self.putAttributes(input, logger: logger) } - /// Modifies the available capacity providers and the default capacity provider strategy for a - /// cluster. You must specify both the available capacity providers and a default capacity provider strategy for - /// the cluster. If the specified cluster has existing capacity providers associated with it, you must - /// specify all existing capacity providers in addition to any new ones you want to add. Any existing - /// capacity providers that are associated with a cluster that are omitted from a PutClusterCapacityProviders API call will be disassociated with the cluster. You can only - /// disassociate an existing capacity provider from a cluster if it's not being used by any existing - /// tasks. When creating a service or running a task on a cluster, if no capacity provider or launch type is - /// specified, then the cluster's default capacity provider strategy is used. We recommend that you define - /// a default capacity provider strategy for your cluster. However, you must specify an empty array - /// ([]) to bypass defining a default strategy. + /// Modifies the available capacity providers and the default capacity provider strategy + /// for a cluster. You must specify both the available capacity providers and a default capacity provider + /// strategy for the cluster. If the specified cluster has existing capacity providers + /// associated with it, you must specify all existing capacity providers in addition to any + /// new ones you want to add. Any existing capacity providers that are associated with a + /// cluster that are omitted from a PutClusterCapacityProviders API call will be disassociated with the + /// cluster. You can only disassociate an existing capacity provider from a cluster if it's + /// not being used by any existing tasks. When creating a service or running a task on a cluster, if no capacity provider or + /// launch type is specified, then the cluster's default capacity provider strategy is used. + /// We recommend that you define a default capacity provider strategy for your cluster. + /// However, you must specify an empty array ([]) to bypass defining a default + /// strategy. @Sendable @inlinable public func putClusterCapacityProviders(_ input: PutClusterCapacityProvidersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutClusterCapacityProvidersResponse { @@ -1935,21 +1990,23 @@ public struct ECS: AWSService { logger: logger ) } - /// Modifies the available capacity providers and the default capacity provider strategy for a - /// cluster. You must specify both the available capacity providers and a default capacity provider strategy for - /// the cluster. If the specified cluster has existing capacity providers associated with it, you must - /// specify all existing capacity providers in addition to any new ones you want to add. Any existing - /// capacity providers that are associated with a cluster that are omitted from a PutClusterCapacityProviders API call will be disassociated with the cluster. You can only - /// disassociate an existing capacity provider from a cluster if it's not being used by any existing - /// tasks. When creating a service or running a task on a cluster, if no capacity provider or launch type is - /// specified, then the cluster's default capacity provider strategy is used. We recommend that you define - /// a default capacity provider strategy for your cluster. However, you must specify an empty array - /// ([]) to bypass defining a default strategy. + /// Modifies the available capacity providers and the default capacity provider strategy + /// for a cluster. You must specify both the available capacity providers and a default capacity provider + /// strategy for the cluster. If the specified cluster has existing capacity providers + /// associated with it, you must specify all existing capacity providers in addition to any + /// new ones you want to add. Any existing capacity providers that are associated with a + /// cluster that are omitted from a PutClusterCapacityProviders API call will be disassociated with the + /// cluster. You can only disassociate an existing capacity provider from a cluster if it's + /// not being used by any existing tasks. When creating a service or running a task on a cluster, if no capacity provider or + /// launch type is specified, then the cluster's default capacity provider strategy is used. + /// We recommend that you define a default capacity provider strategy for your cluster. + /// However, you must specify an empty array ([]) to bypass defining a default + /// strategy. /// /// Parameters: - /// - capacityProviders: The name of one or more capacity providers to associate with the cluster. If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster to modify the capacity provider settings for. If you - /// - defaultCapacityProviderStrategy: The capacity provider strategy to use by default for the cluster. When creating a service or running a task on a cluster, if no capacity provider or launch type is + /// - capacityProviders: The name of one or more capacity providers to associate with the cluster. If specifying a capacity provider that uses an Auto Scaling group, the capacity + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster to modify the capacity provider + /// - defaultCapacityProviderStrategy: The capacity provider strategy to use by default for the cluster. When creating a service or running a task on a cluster, if no capacity provider or /// - logger: Logger use during operation @inlinable public func putClusterCapacityProviders( @@ -1966,8 +2023,8 @@ public struct ECS: AWSService { return try await self.putClusterCapacityProviders(input, logger: logger) } - /// This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent. Registers an EC2 instance into the specified cluster. This instance becomes available to place - /// containers on. + /// This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent. Registers an EC2 instance into the specified cluster. This instance becomes available + /// to place containers on. @Sendable @inlinable public func registerContainerInstance(_ input: RegisterContainerInstanceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RegisterContainerInstanceResponse { @@ -1980,19 +2037,19 @@ public struct ECS: AWSService { logger: logger ) } - /// This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent. Registers an EC2 instance into the specified cluster. This instance becomes available to place - /// containers on. + /// This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent. Registers an EC2 instance into the specified cluster. This instance becomes available + /// to place containers on. /// /// Parameters: /// - attributes: The container instance attributes that this container instance supports. - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster to register your container instance with. + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster to register your container instance /// - containerInstanceArn: The ARN of the container instance (if it was previously registered). - /// - instanceIdentityDocument: The instance identity document for the EC2 instance to register. This document can be found by - /// - instanceIdentityDocumentSignature: The instance identity document signature for the EC2 instance to register. This signature can be - /// - platformDevices: The devices that are available on the container instance. The only supported device type is a - /// - tags: The metadata that you apply to the container instance to help you categorize and organize them. Each + /// - instanceIdentityDocument: The instance identity document for the EC2 instance to register. This document can be + /// - instanceIdentityDocumentSignature: The instance identity document signature for the EC2 instance to register. This + /// - platformDevices: The devices that are available on the container instance. The only supported device + /// - tags: The metadata that you apply to the container instance to help you categorize and /// - totalResources: The resources available on the instance. - /// - versionInfo: The version information for the Amazon ECS container agent and Docker daemon that runs on the container + /// - versionInfo: The version information for the Amazon ECS container agent and Docker daemon that runs on /// - logger: Logger use during operation @inlinable public func registerContainerInstance( @@ -2022,18 +2079,19 @@ public struct ECS: AWSService { } /// Registers a new task definition from the supplied family and - /// containerDefinitions. Optionally, you can add data volumes to your containers with the - /// volumes parameter. For more information about task definition parameters and defaults, - /// see Amazon ECS Task - /// Definitions in the Amazon Elastic Container Service Developer Guide. You can specify a role for your task with the taskRoleArn parameter. When you specify a - /// role for a task, its containers can then use the latest versions of the CLI or SDKs to make API - /// requests to the Amazon Web Services services that are specified in the policy that's associated with the role. For - /// more information, see IAM Roles for Tasks in the - /// Amazon Elastic Container Service Developer Guide. You can specify a Docker networking mode for the containers in your task definition with the - /// networkMode parameter. If you specify the awsvpc network mode, the task - /// is allocated an elastic network interface, and you must specify a NetworkConfiguration when - /// you create a service or run a task with the task definition. For more information, see Task - /// Networking in the Amazon Elastic Container Service Developer Guide. + /// containerDefinitions. Optionally, you can add data volumes to your + /// containers with the volumes parameter. For more information about task + /// definition parameters and defaults, see Amazon ECS Task + /// Definitions in the Amazon Elastic Container Service Developer Guide. You can specify a role for your task with the taskRoleArn parameter. When + /// you specify a role for a task, its containers can then use the latest versions of the + /// CLI or SDKs to make API requests to the Amazon Web Services services that are specified in the + /// policy that's associated with the role. For more information, see IAM + /// Roles for Tasks in the Amazon Elastic Container Service Developer Guide. You can specify a Docker networking mode for the containers in your task definition + /// with the networkMode parameter. If you specify the awsvpc + /// network mode, the task is allocated an elastic network interface, and you must specify a + /// NetworkConfiguration when you create a service or run a task with the task + /// definition. For more information, see Task Networking + /// in the Amazon Elastic Container Service Developer Guide. @Sendable @inlinable public func registerTaskDefinition(_ input: RegisterTaskDefinitionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RegisterTaskDefinitionResponse { @@ -2047,42 +2105,45 @@ public struct ECS: AWSService { ) } /// Registers a new task definition from the supplied family and - /// containerDefinitions. Optionally, you can add data volumes to your containers with the - /// volumes parameter. For more information about task definition parameters and defaults, - /// see Amazon ECS Task - /// Definitions in the Amazon Elastic Container Service Developer Guide. You can specify a role for your task with the taskRoleArn parameter. When you specify a - /// role for a task, its containers can then use the latest versions of the CLI or SDKs to make API - /// requests to the Amazon Web Services services that are specified in the policy that's associated with the role. For - /// more information, see IAM Roles for Tasks in the - /// Amazon Elastic Container Service Developer Guide. You can specify a Docker networking mode for the containers in your task definition with the - /// networkMode parameter. If you specify the awsvpc network mode, the task - /// is allocated an elastic network interface, and you must specify a NetworkConfiguration when - /// you create a service or run a task with the task definition. For more information, see Task - /// Networking in the Amazon Elastic Container Service Developer Guide. + /// containerDefinitions. Optionally, you can add data volumes to your + /// containers with the volumes parameter. For more information about task + /// definition parameters and defaults, see Amazon ECS Task + /// Definitions in the Amazon Elastic Container Service Developer Guide. You can specify a role for your task with the taskRoleArn parameter. When + /// you specify a role for a task, its containers can then use the latest versions of the + /// CLI or SDKs to make API requests to the Amazon Web Services services that are specified in the + /// policy that's associated with the role. For more information, see IAM + /// Roles for Tasks in the Amazon Elastic Container Service Developer Guide. You can specify a Docker networking mode for the containers in your task definition + /// with the networkMode parameter. If you specify the awsvpc + /// network mode, the task is allocated an elastic network interface, and you must specify a + /// NetworkConfiguration when you create a service or run a task with the task + /// definition. For more information, see Task Networking + /// in the Amazon Elastic Container Service Developer Guide. /// /// Parameters: - /// - containerDefinitions: A list of container definitions in JSON format that describe the different containers that make up - /// - cpu: The number of CPU units used by the task. It can be expressed as an integer using CPU units (for - /// - ephemeralStorage: The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total + /// - containerDefinitions: A list of container definitions in JSON format that describe the different containers + /// - cpu: The number of CPU units used by the task. It can be expressed as an integer using CPU + /// - enableFaultInjection: Enables fault injection when you register your task definition and allows for fault injection requests + /// - ephemeralStorage: The amount of ephemeral storage to allocate for the task. This parameter is used to /// - executionRoleArn: The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make Amazon Web Services API calls on your behalf. For informationabout the required IAM roles for Amazon ECS, see IAM roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide. - /// - family: You must specify a family for a task definition. You can use it track multiple versions + /// - family: You must specify a family for a task definition. You can use it track /// - inferenceAccelerators: The Elastic Inference accelerators to use for the containers in the task. /// - ipcMode: The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide. For tasks that use the host IPC mode, IPC namespace related systemControls are not supported. For tasks that use the task IPC mode, IPC namespace related systemControls will apply to all containers within a task. This parameter is not supported for Windows containers or tasks run on Fargate. - /// - memory: The amount of memory (in MiB) used by the task. It can be expressed as an integer using MiB (for + /// - memory: The amount of memory (in MiB) used by the task. It can be expressed as an integer /// - networkMode: The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. If no network mode is specified, the default is bridge. For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, or awsvpc can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode. With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings. When using the host network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide. If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used. /// - pidMode: The process namespace to use for the containers in the task. The valid values are host or task. On Fargate for Linux containers, the only valid value is task. For example, monitoring sidecars might need pidMode to access information about other containers running in the same task. If host is specified, all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace for each container. If the host PID mode is used, there's a heightened risk of undesired process namespace exposure. This parameter is not supported for Windows containers. This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate. - /// - placementConstraints: An array of placement constraint objects to use for the task. You can specify a maximum of 10 + /// - placementConstraints: An array of placement constraint objects to use for the task. You can specify a /// - proxyConfiguration: The configuration details for the App Mesh proxy. For tasks hosted on Amazon EC2 instances, the container instances require at least version - /// - requiresCompatibilities: The task launch type that Amazon ECS validates the task definition against. A client exception is returned - /// - runtimePlatform: The operating system that your tasks definitions run on. A platform family is specified only for - /// - tags: The metadata that you apply to the task definition to help you categorize and organize them. Each tag - /// - taskRoleArn: The short name or full Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All - /// - volumes: A list of volume definitions in JSON format that containers in your task might use. + /// - requiresCompatibilities: The task launch type that Amazon ECS validates the task definition against. A client + /// - runtimePlatform: The operating system that your tasks definitions run on. A platform family is + /// - tags: The metadata that you apply to the task definition to help you categorize and organize + /// - taskRoleArn: The short name or full Amazon Resource Name (ARN) of the IAM role that containers in this task can + /// - volumes: A list of volume definitions in JSON format that containers in your task might /// - logger: Logger use during operation @inlinable public func registerTaskDefinition( containerDefinitions: [ContainerDefinition], cpu: String? = nil, + enableFaultInjection: Bool? = nil, ephemeralStorage: EphemeralStorage? = nil, executionRoleArn: String? = nil, family: String, @@ -2103,6 +2164,7 @@ public struct ECS: AWSService { let input = RegisterTaskDefinitionRequest( containerDefinitions: containerDefinitions, cpu: cpu, + enableFaultInjection: enableFaultInjection, ephemeralStorage: ephemeralStorage, executionRoleArn: executionRoleArn, family: family, @@ -2122,21 +2184,23 @@ public struct ECS: AWSService { return try await self.registerTaskDefinition(input, logger: logger) } - /// Starts a new task using the specified task definition. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using - /// placement constraints and placement strategies. For more information, see Scheduling - /// Tasks in the Amazon Elastic Container Service Developer Guide. Alternatively, you can use StartTask to use your own scheduler or place tasks manually - /// on specific container instances. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a - /// service. For more infomation, see Amazon EBS - /// volumes in the Amazon Elastic Container Service Developer Guide. The Amazon ECS API follows an eventual consistency model. This is because of the distributed nature of the - /// system supporting the API. This means that the result of an API command you run that affects your Amazon ECS - /// resources might not be immediately visible to all subsequent commands you run. Keep this in mind when - /// you carry out an API command that immediately follows a previous API command. To manage eventual consistency, you can do the following: Confirm the state of the resource before you run a command to modify it. Run the - /// DescribeTasks command using an exponential backoff algorithm to ensure that you allow enough - /// time for the previous command to propagate through the system. To do this, run the - /// DescribeTasks command repeatedly, starting with a couple of seconds of wait time and increasing - /// gradually up to five minutes of wait time. Add wait time between subsequent commands, even if the DescribeTasks command returns an - /// accurate response. Apply an exponential backoff algorithm starting with a couple of seconds of - /// wait time, and increase gradually up to about five minutes of wait time. + /// Starts a new task using the specified task definition. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places + /// tasks using placement constraints and placement strategies. For more information, see + /// Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. Alternatively, you can use StartTask to use your own scheduler or place + /// tasks manually on specific container instances. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or + /// updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. The Amazon ECS API follows an eventual consistency model. This is because of the + /// distributed nature of the system supporting the API. This means that the result of an + /// API command you run that affects your Amazon ECS resources might not be immediately visible + /// to all subsequent commands you run. Keep this in mind when you carry out an API command + /// that immediately follows a previous API command. To manage eventual consistency, you can do the following: Confirm the state of the resource before you run a command to modify it. Run + /// the DescribeTasks command using an exponential backoff algorithm to ensure that + /// you allow enough time for the previous command to propagate through the system. + /// To do this, run the DescribeTasks command repeatedly, starting with a couple of + /// seconds of wait time and increasing gradually up to five minutes of wait + /// time. Add wait time between subsequent commands, even if the DescribeTasks command + /// returns an accurate response. Apply an exponential backoff algorithm starting + /// with a couple of seconds of wait time, and increase gradually up to about five + /// minutes of wait time. @Sendable @inlinable public func runTask(_ input: RunTaskRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RunTaskResponse { @@ -2149,42 +2213,44 @@ public struct ECS: AWSService { logger: logger ) } - /// Starts a new task using the specified task definition. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using - /// placement constraints and placement strategies. For more information, see Scheduling - /// Tasks in the Amazon Elastic Container Service Developer Guide. Alternatively, you can use StartTask to use your own scheduler or place tasks manually - /// on specific container instances. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a - /// service. For more infomation, see Amazon EBS - /// volumes in the Amazon Elastic Container Service Developer Guide. The Amazon ECS API follows an eventual consistency model. This is because of the distributed nature of the - /// system supporting the API. This means that the result of an API command you run that affects your Amazon ECS - /// resources might not be immediately visible to all subsequent commands you run. Keep this in mind when - /// you carry out an API command that immediately follows a previous API command. To manage eventual consistency, you can do the following: Confirm the state of the resource before you run a command to modify it. Run the - /// DescribeTasks command using an exponential backoff algorithm to ensure that you allow enough - /// time for the previous command to propagate through the system. To do this, run the - /// DescribeTasks command repeatedly, starting with a couple of seconds of wait time and increasing - /// gradually up to five minutes of wait time. Add wait time between subsequent commands, even if the DescribeTasks command returns an - /// accurate response. Apply an exponential backoff algorithm starting with a couple of seconds of - /// wait time, and increase gradually up to about five minutes of wait time. + /// Starts a new task using the specified task definition. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places + /// tasks using placement constraints and placement strategies. For more information, see + /// Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. Alternatively, you can use StartTask to use your own scheduler or place + /// tasks manually on specific container instances. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or + /// updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. The Amazon ECS API follows an eventual consistency model. This is because of the + /// distributed nature of the system supporting the API. This means that the result of an + /// API command you run that affects your Amazon ECS resources might not be immediately visible + /// to all subsequent commands you run. Keep this in mind when you carry out an API command + /// that immediately follows a previous API command. To manage eventual consistency, you can do the following: Confirm the state of the resource before you run a command to modify it. Run + /// the DescribeTasks command using an exponential backoff algorithm to ensure that + /// you allow enough time for the previous command to propagate through the system. + /// To do this, run the DescribeTasks command repeatedly, starting with a couple of + /// seconds of wait time and increasing gradually up to five minutes of wait + /// time. Add wait time between subsequent commands, even if the DescribeTasks command + /// returns an accurate response. Apply an exponential backoff algorithm starting + /// with a couple of seconds of wait time, and increase gradually up to about five + /// minutes of wait time. /// /// Parameters: - /// - capacityProviderStrategy: The capacity provider strategy to use for the task. If a capacityProviderStrategy is specified, the launchType parameter must - /// - clientToken: An identifier that you provide to ensure the idempotency of the request. It must be unique and is - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster to run your task on. If you do not specify a cluster, the default cluster is assumed. - /// - count: The number of instantiations of the specified task to place on your cluster. You can specify up to 10 - /// - enableECSManagedTags: Specifies whether to use Amazon ECS managed tags for the task. For more information, see Tagging Your Amazon ECS - /// - enableExecuteCommand: Determines whether to use the execute command functionality for the containers in this task. If - /// - group: The name of the task group to associate with the task. The default value is the family name of the - /// - launchType: The infrastructure to run your standalone task on. For more information, see Amazon ECS launch - /// - networkConfiguration: The network configuration for the task. This parameter is required for task definitions that use the - /// - overrides: A list of container overrides in JSON format that specify the name of a container in the specified - /// - placementConstraints: An array of placement constraint objects to use for the task. You can specify up to 10 constraints - /// - placementStrategy: The placement strategy objects to use for the task. You can specify a maximum of 5 strategy rules for - /// - platformVersion: The platform version the task uses. A platform version is only specified for tasks hosted on - /// - propagateTags: Specifies whether to propagate the tags from the task definition to the task. If no value is + /// - capacityProviderStrategy: The capacity provider strategy to use for the task. If a capacityProviderStrategy is specified, the launchType + /// - clientToken: An identifier that you provide to ensure the idempotency of the request. It must be + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster to run your task on. + /// - count: The number of instantiations of the specified task to place on your cluster. You can + /// - enableECSManagedTags: Specifies whether to use Amazon ECS managed tags for the task. For more information, see + /// - enableExecuteCommand: Determines whether to use the execute command functionality for the containers in this + /// - group: The name of the task group to associate with the task. The default value is the family + /// - launchType: The infrastructure to run your standalone task on. For more information, see Amazon ECS + /// - networkConfiguration: The network configuration for the task. This parameter is required for task + /// - overrides: A list of container overrides in JSON format that specify the name of a container in + /// - placementConstraints: An array of placement constraint objects to use for the task. You can specify up to 10 + /// - placementStrategy: The placement strategy objects to use for the task. You can specify a maximum of 5 + /// - platformVersion: The platform version the task uses. A platform version is only specified for tasks + /// - propagateTags: Specifies whether to propagate the tags from the task definition to the task. If no /// - referenceId: This parameter is only used by Amazon ECS. It is not intended for use by customers. - /// - startedBy: An optional tag specified when a task is started. For example, if you automatically trigger a task to - /// - tags: The metadata that you apply to the task to help you categorize and organize them. Each tag consists - /// - taskDefinition: The family and revision (family:revision) or full ARN of the - /// - volumeConfigurations: The details of the volume that was configuredAtLaunch. You can configure the size, + /// - startedBy: An optional tag specified when a task is started. For example, if you automatically + /// - tags: The metadata that you apply to the task to help you categorize and organize them. Each + /// - taskDefinition: The family and revision (family:revision) or + /// - volumeConfigurations: The details of the volume that was configuredAtLaunch. You can configure /// - logger: Logger use during operation @inlinable public func runTask( @@ -2233,12 +2299,10 @@ public struct ECS: AWSService { return try await self.runTask(input, logger: logger) } - /// Starts a new task from the specified task definition on the specified container instance or - /// instances. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. Alternatively, you can useRunTask to place tasks for you. For more information, see - /// Scheduling - /// Tasks in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a - /// service. For more infomation, see Amazon EBS - /// volumes in the Amazon Elastic Container Service Developer Guide. + /// Starts a new task from the specified task definition on the specified container + /// instance or instances. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. Alternatively, you can useRunTask to place tasks for you. For more + /// information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or + /// updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. @Sendable @inlinable public func startTask(_ input: StartTaskRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartTaskResponse { @@ -2251,27 +2315,25 @@ public struct ECS: AWSService { logger: logger ) } - /// Starts a new task from the specified task definition on the specified container instance or - /// instances. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. Alternatively, you can useRunTask to place tasks for you. For more information, see - /// Scheduling - /// Tasks in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a - /// service. For more infomation, see Amazon EBS - /// volumes in the Amazon Elastic Container Service Developer Guide. + /// Starts a new task from the specified task definition on the specified container + /// instance or instances. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. Alternatively, you can useRunTask to place tasks for you. For more + /// information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or + /// updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. /// /// Parameters: /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster where to start your task. - /// - containerInstances: The container instance IDs or full ARN entries for the container instances where you would like to - /// - enableECSManagedTags: Specifies whether to use Amazon ECS managed tags for the task. For more information, see Tagging Your Amazon ECS - /// - enableExecuteCommand: Whether or not the execute command functionality is turned on for the task. If true, - /// - group: The name of the task group to associate with the task. The default value is the family name of the + /// - containerInstances: The container instance IDs or full ARN entries for the container instances where you + /// - enableECSManagedTags: Specifies whether to use Amazon ECS managed tags for the task. For more information, see + /// - enableExecuteCommand: Whether or not the execute command functionality is turned on for the task. If + /// - group: The name of the task group to associate with the task. The default value is the family /// - networkConfiguration: The VPC subnet and security group configuration for tasks that receive their own elastic network interface by using the awsvpc networking mode. - /// - overrides: A list of container overrides in JSON format that specify the name of a container in the specified - /// - propagateTags: Specifies whether to propagate the tags from the task definition or the service to the task. If no + /// - overrides: A list of container overrides in JSON format that specify the name of a container in + /// - propagateTags: Specifies whether to propagate the tags from the task definition or the service to the /// - referenceId: This parameter is only used by Amazon ECS. It is not intended for use by customers. - /// - startedBy: An optional tag specified when a task is started. For example, if you automatically trigger - /// - tags: The metadata that you apply to the task to help you categorize and organize them. Each tag consists - /// - taskDefinition: The family and revision (family:revision) or full ARN of the - /// - volumeConfigurations: The details of the volume that was configuredAtLaunch. You can configure the size, + /// - startedBy: An optional tag specified when a task is started. For example, if you automatically + /// - tags: The metadata that you apply to the task to help you categorize and organize them. Each + /// - taskDefinition: The family and revision (family:revision) or + /// - volumeConfigurations: The details of the volume that was configuredAtLaunch. You can configure /// - logger: Logger use during operation @inlinable public func startTask( @@ -2308,15 +2370,17 @@ public struct ECS: AWSService { return try await self.startTask(input, logger: logger) } - /// Stops a running task. Any tags associated with the task will be deleted. When you call StopTask on a task, the equivalent of docker stop is issued - /// to the containers running in the task. This results in a SIGTERM value and a default - /// 30-second timeout, after which the SIGKILL value is sent and the containers are forcibly - /// stopped. If the container handles the SIGTERM value gracefully and exits within 30 seconds - /// from receiving it, no SIGKILL value is sent. For Windows containers, POSIX signals do not work and runtime stops the container by sending a - /// CTRL_SHUTDOWN_EVENT. For more information, see Unable to react to graceful shutdown of (Windows) - /// container #25982 on GitHub. The default 30-second timeout can be configured on the Amazon ECS container agent with the - /// ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see Amazon ECS - /// Container Agent Configuration in the Amazon Elastic Container Service Developer Guide. + /// Stops a running task. Any tags associated with the task will be deleted. When you call StopTask on a task, the equivalent of docker + /// stop is issued to the containers running in the task. This results in a + /// SIGTERM value and a default 30-second timeout, after which the + /// SIGKILL value is sent and the containers are forcibly stopped. If the + /// container handles the SIGTERM value gracefully and exits within 30 seconds + /// from receiving it, no SIGKILL value is sent. For Windows containers, POSIX signals do not work and runtime stops the container by + /// sending a CTRL_SHUTDOWN_EVENT. For more information, see Unable to react to graceful shutdown + /// of (Windows) container #25982 on GitHub. The default 30-second timeout can be configured on the Amazon ECS container agent with + /// the ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see + /// Amazon ECS Container Agent Configuration in the + /// Amazon Elastic Container Service Developer Guide. @Sendable @inlinable public func stopTask(_ input: StopTaskRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StopTaskResponse { @@ -2329,19 +2393,21 @@ public struct ECS: AWSService { logger: logger ) } - /// Stops a running task. Any tags associated with the task will be deleted. When you call StopTask on a task, the equivalent of docker stop is issued - /// to the containers running in the task. This results in a SIGTERM value and a default - /// 30-second timeout, after which the SIGKILL value is sent and the containers are forcibly - /// stopped. If the container handles the SIGTERM value gracefully and exits within 30 seconds - /// from receiving it, no SIGKILL value is sent. For Windows containers, POSIX signals do not work and runtime stops the container by sending a - /// CTRL_SHUTDOWN_EVENT. For more information, see Unable to react to graceful shutdown of (Windows) - /// container #25982 on GitHub. The default 30-second timeout can be configured on the Amazon ECS container agent with the - /// ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see Amazon ECS - /// Container Agent Configuration in the Amazon Elastic Container Service Developer Guide. + /// Stops a running task. Any tags associated with the task will be deleted. When you call StopTask on a task, the equivalent of docker + /// stop is issued to the containers running in the task. This results in a + /// SIGTERM value and a default 30-second timeout, after which the + /// SIGKILL value is sent and the containers are forcibly stopped. If the + /// container handles the SIGTERM value gracefully and exits within 30 seconds + /// from receiving it, no SIGKILL value is sent. For Windows containers, POSIX signals do not work and runtime stops the container by + /// sending a CTRL_SHUTDOWN_EVENT. For more information, see Unable to react to graceful shutdown + /// of (Windows) container #25982 on GitHub. The default 30-second timeout can be configured on the Amazon ECS container agent with + /// the ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see + /// Amazon ECS Container Agent Configuration in the + /// Amazon Elastic Container Service Developer Guide. /// /// Parameters: /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task to stop. - /// - reason: An optional message specified when a task is stopped. For example, if you're using a custom + /// - reason: An optional message specified when a task is stopped. For example, if you're using a /// - task: The task ID of the task to stop. /// - logger: Logger use during operation @inlinable @@ -2376,7 +2442,7 @@ public struct ECS: AWSService { /// /// Parameters: /// - attachments: Any attachments associated with the state change request. - /// - cluster: The short name or full ARN of the cluster that hosts the container instance the attachment belongs + /// - cluster: The short name or full ARN of the cluster that hosts the container instance the /// - logger: Logger use during operation @inlinable public func submitAttachmentStateChanges( @@ -2497,9 +2563,10 @@ public struct ECS: AWSService { return try await self.submitTaskStateChange(input, logger: logger) } - /// Associates the specified tags to a resource with the specified resourceArn. If existing - /// tags on a resource aren't specified in the request parameters, they aren't changed. When a resource is - /// deleted, the tags that are associated with that resource are deleted as well. + /// Associates the specified tags to a resource with the specified + /// resourceArn. If existing tags on a resource aren't specified in the + /// request parameters, they aren't changed. When a resource is deleted, the tags that are + /// associated with that resource are deleted as well. @Sendable @inlinable public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse { @@ -2512,12 +2579,13 @@ public struct ECS: AWSService { logger: logger ) } - /// Associates the specified tags to a resource with the specified resourceArn. If existing - /// tags on a resource aren't specified in the request parameters, they aren't changed. When a resource is - /// deleted, the tags that are associated with that resource are deleted as well. + /// Associates the specified tags to a resource with the specified + /// resourceArn. If existing tags on a resource aren't specified in the + /// request parameters, they aren't changed. When a resource is deleted, the tags that are + /// associated with that resource are deleted as well. /// /// Parameters: - /// - resourceArn: The Amazon Resource Name (ARN) of the resource to add tags to. Currently, the supported resources are Amazon ECS capacity + /// - resourceArn: The Amazon Resource Name (ARN) of the resource to add tags to. Currently, the supported resources are /// - tags: The tags to add to the resource. A tag is an array of key-value pairs. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. /// - logger: Logger use during operation @inlinable @@ -2549,7 +2617,7 @@ public struct ECS: AWSService { /// Deletes specified tags from a resource. /// /// Parameters: - /// - resourceArn: The Amazon Resource Name (ARN) of the resource to delete tags from. Currently, the supported resources are Amazon ECS + /// - resourceArn: The Amazon Resource Name (ARN) of the resource to delete tags from. Currently, the supported resources /// - tagKeys: The keys of the tags to be removed. /// - logger: Logger use during operation @inlinable @@ -2652,7 +2720,7 @@ public struct ECS: AWSService { /// /// Parameters: /// - cluster: The name of the cluster to modify the settings for. - /// - settings: The setting to use by default for a cluster. This parameter is used to turn on CloudWatch Container + /// - settings: The setting to use by default for a cluster. This parameter is used to turn on CloudWatch /// - logger: Logger use during operation @inlinable public func updateClusterSettings( @@ -2667,18 +2735,18 @@ public struct ECS: AWSService { return try await self.updateClusterSettings(input, logger: logger) } - /// Updates the Amazon ECS container agent on a specified container instance. Updating the Amazon ECS container - /// agent doesn't interrupt running tasks or services on the container instance. The process for updating - /// the agent differs depending on whether your container instance was launched with the Amazon ECS-optimized - /// AMI or another operating system. The UpdateContainerAgent API isn't supported for container instances using the - /// Amazon ECS-optimized Amazon Linux 2 (arm64) AMI. To update the container agent, you can update the - /// ecs-init package. This updates the agent. For more information, see Updating - /// the Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide. Agent updates with the UpdateContainerAgent API operation do not apply to Windows - /// container instances. We recommend that you launch new container instances to update the agent - /// version in your Windows clusters. The UpdateContainerAgent API requires an Amazon ECS-optimized AMI or Amazon Linux AMI with - /// the ecs-init service installed and running. For help updating the Amazon ECS container agent on - /// other operating systems, see Manually updating - /// the Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide. + /// Updates the Amazon ECS container agent on a specified container instance. Updating the + /// Amazon ECS container agent doesn't interrupt running tasks or services on the container + /// instance. The process for updating the agent differs depending on whether your container + /// instance was launched with the Amazon ECS-optimized AMI or another operating system. The UpdateContainerAgent API isn't supported for container instances + /// using the Amazon ECS-optimized Amazon Linux 2 (arm64) AMI. To update the container agent, + /// you can update the ecs-init package. This updates the agent. For more + /// information, see Updating the + /// Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide. Agent updates with the UpdateContainerAgent API operation do not + /// apply to Windows container instances. We recommend that you launch new container + /// instances to update the agent version in your Windows clusters. The UpdateContainerAgent API requires an Amazon ECS-optimized AMI or Amazon + /// Linux AMI with the ecs-init service installed and running. For help + /// updating the Amazon ECS container agent on other operating systems, see Manually updating the Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide. @Sendable @inlinable public func updateContainerAgent(_ input: UpdateContainerAgentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateContainerAgentResponse { @@ -2691,22 +2759,22 @@ public struct ECS: AWSService { logger: logger ) } - /// Updates the Amazon ECS container agent on a specified container instance. Updating the Amazon ECS container - /// agent doesn't interrupt running tasks or services on the container instance. The process for updating - /// the agent differs depending on whether your container instance was launched with the Amazon ECS-optimized - /// AMI or another operating system. The UpdateContainerAgent API isn't supported for container instances using the - /// Amazon ECS-optimized Amazon Linux 2 (arm64) AMI. To update the container agent, you can update the - /// ecs-init package. This updates the agent. For more information, see Updating - /// the Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide. Agent updates with the UpdateContainerAgent API operation do not apply to Windows - /// container instances. We recommend that you launch new container instances to update the agent - /// version in your Windows clusters. The UpdateContainerAgent API requires an Amazon ECS-optimized AMI or Amazon Linux AMI with - /// the ecs-init service installed and running. For help updating the Amazon ECS container agent on - /// other operating systems, see Manually updating - /// the Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide. + /// Updates the Amazon ECS container agent on a specified container instance. Updating the + /// Amazon ECS container agent doesn't interrupt running tasks or services on the container + /// instance. The process for updating the agent differs depending on whether your container + /// instance was launched with the Amazon ECS-optimized AMI or another operating system. The UpdateContainerAgent API isn't supported for container instances + /// using the Amazon ECS-optimized Amazon Linux 2 (arm64) AMI. To update the container agent, + /// you can update the ecs-init package. This updates the agent. For more + /// information, see Updating the + /// Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide. Agent updates with the UpdateContainerAgent API operation do not + /// apply to Windows container instances. We recommend that you launch new container + /// instances to update the agent version in your Windows clusters. The UpdateContainerAgent API requires an Amazon ECS-optimized AMI or Amazon + /// Linux AMI with the ecs-init service installed and running. For help + /// updating the Amazon ECS container agent on other operating systems, see Manually updating the Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide. /// /// Parameters: - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that your container instance is running on. - /// - containerInstance: The container instance ID or full ARN entries for the container instance where you would like to + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that your container instance is + /// - containerInstance: The container instance ID or full ARN entries for the container instance where you /// - logger: Logger use during operation @inlinable public func updateContainerAgent( @@ -2721,34 +2789,38 @@ public struct ECS: AWSService { return try await self.updateContainerAgent(input, logger: logger) } - /// Modifies the status of an Amazon ECS container instance. Once a container instance has reached an ACTIVE state, you can change the status of a - /// container instance to DRAINING to manually remove an instance from a cluster, for example - /// to perform system updates, update the Docker daemon, or scale down the cluster size. A container instance can't be changed to DRAINING until it has reached an - /// ACTIVE status. If the instance is in any other status, an error will be - /// received. When you set a container instance to DRAINING, Amazon ECS prevents new tasks from being - /// scheduled for placement on the container instance and replacement service tasks are started on other - /// container instances in the cluster if the resources are available. Service tasks on the container - /// instance that are in the PENDING state are stopped immediately. Service tasks on the container instance that are in the RUNNING state are stopped and - /// replaced according to the service's deployment configuration parameters, - /// minimumHealthyPercent and maximumPercent. You can change the deployment - /// configuration of your service using UpdateService. If minimumHealthyPercent is below 100%, the scheduler can ignore + /// Modifies the status of an Amazon ECS container instance. Once a container instance has reached an ACTIVE state, you can change the + /// status of a container instance to DRAINING to manually remove an instance + /// from a cluster, for example to perform system updates, update the Docker daemon, or + /// scale down the cluster size. A container instance can't be changed to DRAINING until it has + /// reached an ACTIVE status. If the instance is in any other status, an + /// error will be received. When you set a container instance to DRAINING, Amazon ECS prevents new tasks + /// from being scheduled for placement on the container instance and replacement service + /// tasks are started on other container instances in the cluster if the resources are + /// available. Service tasks on the container instance that are in the PENDING + /// state are stopped immediately. Service tasks on the container instance that are in the RUNNING state are + /// stopped and replaced according to the service's deployment configuration parameters, + /// minimumHealthyPercent and maximumPercent. You can change + /// the deployment configuration of your service using UpdateService. If minimumHealthyPercent is below 100%, the scheduler can ignore /// desiredCount temporarily during task replacement. For example, - /// desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two - /// existing tasks before starting two new tasks. If the minimum is 100%, the service scheduler - /// can't remove existing tasks until the replacement tasks are considered healthy. Tasks for - /// services that do not use a load balancer are considered healthy if they're in the - /// RUNNING state. Tasks for services that use a load balancer are considered - /// healthy if they're in the RUNNING state and are reported as healthy by the load - /// balancer. The maximumPercent parameter represents an upper limit on the number of running - /// tasks during task replacement. You can use this to define the replacement batch size. For - /// example, if desiredCount is four tasks, a maximum of 200% starts four new tasks - /// before stopping the four tasks to be drained, provided that the cluster resources required to - /// do this are available. If the maximum is 100%, then replacement tasks can't start until the - /// draining tasks have stopped. Any PENDING or RUNNING tasks that do not belong to a service aren't - /// affected. You must wait for them to finish or stop them manually. A container instance has completed draining when it has no more RUNNING tasks. You can - /// verify this using ListTasks. When a container instance has been drained, you can set a container instance to ACTIVE - /// status and once it has reached that status the Amazon ECS scheduler can begin scheduling tasks on the - /// instance again. + /// desiredCount is four tasks, a minimum of 50% allows the + /// scheduler to stop two existing tasks before starting two new tasks. If the + /// minimum is 100%, the service scheduler can't remove existing tasks until the + /// replacement tasks are considered healthy. Tasks for services that do not use a + /// load balancer are considered healthy if they're in the RUNNING + /// state. Tasks for services that use a load balancer are considered healthy if + /// they're in the RUNNING state and are reported as healthy by the + /// load balancer. The maximumPercent parameter represents an upper limit on the + /// number of running tasks during task replacement. You can use this to define the + /// replacement batch size. For example, if desiredCount is four tasks, + /// a maximum of 200% starts four new tasks before stopping the four tasks to be + /// drained, provided that the cluster resources required to do this are available. + /// If the maximum is 100%, then replacement tasks can't start until the draining + /// tasks have stopped. Any PENDING or RUNNING tasks that do not belong to a service + /// aren't affected. You must wait for them to finish or stop them manually. A container instance has completed draining when it has no more RUNNING + /// tasks. You can verify this using ListTasks. When a container instance has been drained, you can set a container instance to + /// ACTIVE status and once it has reached that status the Amazon ECS scheduler + /// can begin scheduling tasks on the instance again. @Sendable @inlinable public func updateContainerInstancesState(_ input: UpdateContainerInstancesStateRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateContainerInstancesStateResponse { @@ -2761,39 +2833,43 @@ public struct ECS: AWSService { logger: logger ) } - /// Modifies the status of an Amazon ECS container instance. Once a container instance has reached an ACTIVE state, you can change the status of a - /// container instance to DRAINING to manually remove an instance from a cluster, for example - /// to perform system updates, update the Docker daemon, or scale down the cluster size. A container instance can't be changed to DRAINING until it has reached an - /// ACTIVE status. If the instance is in any other status, an error will be - /// received. When you set a container instance to DRAINING, Amazon ECS prevents new tasks from being - /// scheduled for placement on the container instance and replacement service tasks are started on other - /// container instances in the cluster if the resources are available. Service tasks on the container - /// instance that are in the PENDING state are stopped immediately. Service tasks on the container instance that are in the RUNNING state are stopped and - /// replaced according to the service's deployment configuration parameters, - /// minimumHealthyPercent and maximumPercent. You can change the deployment - /// configuration of your service using UpdateService. If minimumHealthyPercent is below 100%, the scheduler can ignore + /// Modifies the status of an Amazon ECS container instance. Once a container instance has reached an ACTIVE state, you can change the + /// status of a container instance to DRAINING to manually remove an instance + /// from a cluster, for example to perform system updates, update the Docker daemon, or + /// scale down the cluster size. A container instance can't be changed to DRAINING until it has + /// reached an ACTIVE status. If the instance is in any other status, an + /// error will be received. When you set a container instance to DRAINING, Amazon ECS prevents new tasks + /// from being scheduled for placement on the container instance and replacement service + /// tasks are started on other container instances in the cluster if the resources are + /// available. Service tasks on the container instance that are in the PENDING + /// state are stopped immediately. Service tasks on the container instance that are in the RUNNING state are + /// stopped and replaced according to the service's deployment configuration parameters, + /// minimumHealthyPercent and maximumPercent. You can change + /// the deployment configuration of your service using UpdateService. If minimumHealthyPercent is below 100%, the scheduler can ignore /// desiredCount temporarily during task replacement. For example, - /// desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two - /// existing tasks before starting two new tasks. If the minimum is 100%, the service scheduler - /// can't remove existing tasks until the replacement tasks are considered healthy. Tasks for - /// services that do not use a load balancer are considered healthy if they're in the - /// RUNNING state. Tasks for services that use a load balancer are considered - /// healthy if they're in the RUNNING state and are reported as healthy by the load - /// balancer. The maximumPercent parameter represents an upper limit on the number of running - /// tasks during task replacement. You can use this to define the replacement batch size. For - /// example, if desiredCount is four tasks, a maximum of 200% starts four new tasks - /// before stopping the four tasks to be drained, provided that the cluster resources required to - /// do this are available. If the maximum is 100%, then replacement tasks can't start until the - /// draining tasks have stopped. Any PENDING or RUNNING tasks that do not belong to a service aren't - /// affected. You must wait for them to finish or stop them manually. A container instance has completed draining when it has no more RUNNING tasks. You can - /// verify this using ListTasks. When a container instance has been drained, you can set a container instance to ACTIVE - /// status and once it has reached that status the Amazon ECS scheduler can begin scheduling tasks on the - /// instance again. + /// desiredCount is four tasks, a minimum of 50% allows the + /// scheduler to stop two existing tasks before starting two new tasks. If the + /// minimum is 100%, the service scheduler can't remove existing tasks until the + /// replacement tasks are considered healthy. Tasks for services that do not use a + /// load balancer are considered healthy if they're in the RUNNING + /// state. Tasks for services that use a load balancer are considered healthy if + /// they're in the RUNNING state and are reported as healthy by the + /// load balancer. The maximumPercent parameter represents an upper limit on the + /// number of running tasks during task replacement. You can use this to define the + /// replacement batch size. For example, if desiredCount is four tasks, + /// a maximum of 200% starts four new tasks before stopping the four tasks to be + /// drained, provided that the cluster resources required to do this are available. + /// If the maximum is 100%, then replacement tasks can't start until the draining + /// tasks have stopped. Any PENDING or RUNNING tasks that do not belong to a service + /// aren't affected. You must wait for them to finish or stop them manually. A container instance has completed draining when it has no more RUNNING + /// tasks. You can verify this using ListTasks. When a container instance has been drained, you can set a container instance to + /// ACTIVE status and once it has reached that status the Amazon ECS scheduler + /// can begin scheduling tasks on the instance again. /// /// Parameters: - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instance to update. + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instance to /// - containerInstances: A list of up to 10 container instance IDs or full ARN entries. - /// - status: The container instance state to update the container instance with. The only valid values for this + /// - status: The container instance state to update the container instance with. The only valid /// - logger: Logger use during operation @inlinable public func updateContainerInstancesState( @@ -2810,68 +2886,75 @@ public struct ECS: AWSService { return try await self.updateContainerInstancesState(input, logger: logger) } - /// Modifies the parameters of a service. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. For services using the rolling update (ECS) you can update the desired count, deployment - /// configuration, network configuration, load balancers, service registries, enable ECS managed tags - /// option, propagate tags option, task placement constraints and strategies, and task definition. When you - /// update any of these parameters, Amazon ECS starts new tasks with the new configuration. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a - /// task, or when creating or updating a service. For more infomation, see Amazon EBS - /// volumes in the Amazon Elastic Container Service Developer Guide. You can update your volume configurations and trigger a new - /// deployment. volumeConfigurations is only supported for REPLICA service and not DAEMON - /// service. If you leave volumeConfigurations null, it doesn't trigger a new deployment. For more infomation on volumes, see Amazon EBS - /// volumes in the Amazon Elastic Container Service Developer Guide. For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired - /// count, deployment configuration, health check grace period, task placement constraints and strategies, - /// enable ECS managed tags option, and propagate tags can be updated using this API. If the network - /// configuration, platform version, task definition, or load balancer need to be updated, create a new - /// CodeDeploy deployment. For more information, see CreateDeployment in the - /// CodeDeploy API Reference. For services using an external deployment controller, you can update only the desired count, task - /// placement constraints and strategies, health check grace period, enable ECS managed tags option, and - /// propagate tags option, using this API. If the launch type, load balancer, network configuration, - /// platform version, or task definition need to be updated, create a new task set For more information, - /// see CreateTaskSet. You can add to or subtract from the number of instantiations of a task definition in a service by - /// specifying the cluster that the service is running in and a new desiredCount - /// parameter. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a - /// task, or when creating or updating a service. For more infomation, see Amazon EBS - /// volumes in the Amazon Elastic Container Service Developer Guide. If you have updated the container image of your application, you can create a new task definition - /// with that image and deploy it to your service. The service scheduler uses the minimum healthy percent - /// and maximum percent parameters (in the service's deployment configuration) to determine the deployment - /// strategy. If your updated Docker image uses the same tag as what is in the existing task definition for - /// your service (for example, my_image:latest), you don't need to create a new revision - /// of your task definition. You can update the service using the forceNewDeployment - /// option. The new tasks launched by the deployment pull the current image/tag combination from your - /// repository when they start. You can also update the deployment configuration of a service. When a deployment is triggered by - /// updating the task definition of a service, the service scheduler uses the deployment configuration - /// parameters, minimumHealthyPercent and maximumPercent, to determine the - /// deployment strategy. If minimumHealthyPercent is below 100%, the scheduler can ignore + /// Modifies the parameters of a service. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. For services using the rolling update (ECS) you can update the desired + /// count, deployment configuration, network configuration, load balancers, service + /// registries, enable ECS managed tags option, propagate tags option, task placement + /// constraints and strategies, and task definition. When you update any of these + /// parameters, Amazon ECS starts new tasks with the new configuration. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or + /// running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update + /// your volume configurations and trigger a new deployment. + /// volumeConfigurations is only supported for REPLICA service and not + /// DAEMON service. If you leave volumeConfigurations null, it doesn't trigger a new deployment. For more infomation on volumes, + /// see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. For services using the blue/green (CODE_DEPLOY) deployment controller, + /// only the desired count, deployment configuration, health check grace period, task + /// placement constraints and strategies, enable ECS managed tags option, and propagate tags + /// can be updated using this API. If the network configuration, platform version, task + /// definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more + /// information, see CreateDeployment in the CodeDeploy API Reference. For services using an external deployment controller, you can update only the desired + /// count, task placement constraints and strategies, health check grace period, enable ECS + /// managed tags option, and propagate tags option, using this API. If the launch type, load + /// balancer, network configuration, platform version, or task definition need to be + /// updated, create a new task set For more information, see CreateTaskSet. You can add to or subtract from the number of instantiations of a task definition in a + /// service by specifying the cluster that the service is running in and a new + /// desiredCount parameter. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or + /// running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. If you have updated the container image of your application, you can create a new task + /// definition with that image and deploy it to your service. The service scheduler uses the + /// minimum healthy percent and maximum percent parameters (in the service's deployment + /// configuration) to determine the deployment strategy. If your updated Docker image uses the same tag as what is in the existing task + /// definition for your service (for example, my_image:latest), you don't + /// need to create a new revision of your task definition. You can update the service + /// using the forceNewDeployment option. The new tasks launched by the + /// deployment pull the current image/tag combination from your repository when they + /// start. You can also update the deployment configuration of a service. When a deployment is + /// triggered by updating the task definition of a service, the service scheduler uses the + /// deployment configuration parameters, minimumHealthyPercent and + /// maximumPercent, to determine the deployment strategy. If minimumHealthyPercent is below 100%, the scheduler can ignore /// desiredCount temporarily during a deployment. For example, if - /// desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two - /// existing tasks before starting two new tasks. Tasks for services that don't use a load balancer - /// are considered healthy if they're in the RUNNING state. Tasks for services that - /// use a load balancer are considered healthy if they're in the RUNNING state and are - /// reported as healthy by the load balancer. The maximumPercent parameter represents an upper limit on the number of running - /// tasks during a deployment. You can use it to define the deployment batch size. For example, if - /// desiredCount is four tasks, a maximum of 200% starts four new tasks before - /// stopping the four older tasks (provided that the cluster resources required to do this are - /// available). When UpdateService stops a task during a deployment, the equivalent of docker stop - /// is issued to the containers running in the task. This results in a SIGTERM and a 30-second - /// timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the - /// container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no - /// SIGKILL is sent. When the service scheduler launches new tasks, it determines task placement in your cluster with the - /// following logic. Determine which of the container instances in your cluster can support your service's task - /// definition. For example, they have the required CPU, memory, ports, and container instance - /// attributes. By default, the service scheduler attempts to balance tasks across Availability Zones in this - /// manner even though you can choose a different placement strategy. Sort the valid container instances by the fewest number of running tasks for this - /// service in the same Availability Zone as the instance. For example, if zone A has one - /// running service task and zones B and C each have zero, valid container instances in - /// either zone B or C are considered optimal for placement. Place the new service task on a valid container instance in an optimal Availability - /// Zone (based on the previous steps), favoring container instances with the fewest number - /// of running tasks for this service. When the service scheduler stops running tasks, it attempts to maintain balance across the - /// Availability Zones in your cluster using the following logic: Sort the container instances by the largest number of running tasks for this service in the - /// same Availability Zone as the instance. For example, if zone A has one running service task and - /// zones B and C each have two, container instances in either zone B or C are considered optimal - /// for termination. Stop the task on a container instance in an optimal Availability Zone (based on the previous - /// steps), favoring container instances with the largest number of running tasks for this - /// service. You must have a service-linked role when you update any of the following service - /// properties: loadBalancers, serviceRegistries For more information about the role see the CreateService request parameter role . + /// desiredCount is four tasks, a minimum of 50% allows the + /// scheduler to stop two existing tasks before starting two new tasks. Tasks for + /// services that don't use a load balancer are considered healthy if they're in the + /// RUNNING state. Tasks for services that use a load balancer are + /// considered healthy if they're in the RUNNING state and are reported + /// as healthy by the load balancer. The maximumPercent parameter represents an upper limit on the + /// number of running tasks during a deployment. You can use it to define the + /// deployment batch size. For example, if desiredCount is four tasks, + /// a maximum of 200% starts four new tasks before stopping the four older tasks + /// (provided that the cluster resources required to do this are available). When UpdateService + /// stops a task during a deployment, the equivalent of docker stop is issued + /// to the containers running in the task. This results in a SIGTERM and a + /// 30-second timeout. After this, SIGKILL is sent and the containers are + /// forcibly stopped. If the container handles the SIGTERM gracefully and exits + /// within 30 seconds from receiving it, no SIGKILL is sent. When the service scheduler launches new tasks, it determines task placement in your + /// cluster with the following logic. Determine which of the container instances in your cluster can support your + /// service's task definition. For example, they have the required CPU, memory, + /// ports, and container instance attributes. By default, the service scheduler attempts to balance tasks across + /// Availability Zones in this manner even though you can choose a different + /// placement strategy. Sort the valid container instances by the fewest number of running + /// tasks for this service in the same Availability Zone as the instance. + /// For example, if zone A has one running service task and zones B and C + /// each have zero, valid container instances in either zone B or C are + /// considered optimal for placement. Place the new service task on a valid container instance in an optimal + /// Availability Zone (based on the previous steps), favoring container + /// instances with the fewest number of running tasks for this + /// service. When the service scheduler stops running tasks, it attempts to maintain balance across + /// the Availability Zones in your cluster using the following logic: Sort the container instances by the largest number of running tasks for this + /// service in the same Availability Zone as the instance. For example, if zone A + /// has one running service task and zones B and C each have two, container + /// instances in either zone B or C are considered optimal for termination. Stop the task on a container instance in an optimal Availability Zone (based + /// on the previous steps), favoring container instances with the largest number of + /// running tasks for this service. You must have a service-linked role when you update any of the following service + /// properties: loadBalancers, serviceRegistries For more information about the role see the CreateService request + /// parameter role . @Sendable @inlinable public func updateService(_ input: UpdateServiceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateServiceResponse { @@ -2884,91 +2967,98 @@ public struct ECS: AWSService { logger: logger ) } - /// Modifies the parameters of a service. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. For services using the rolling update (ECS) you can update the desired count, deployment - /// configuration, network configuration, load balancers, service registries, enable ECS managed tags - /// option, propagate tags option, task placement constraints and strategies, and task definition. When you - /// update any of these parameters, Amazon ECS starts new tasks with the new configuration. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a - /// task, or when creating or updating a service. For more infomation, see Amazon EBS - /// volumes in the Amazon Elastic Container Service Developer Guide. You can update your volume configurations and trigger a new - /// deployment. volumeConfigurations is only supported for REPLICA service and not DAEMON - /// service. If you leave volumeConfigurations null, it doesn't trigger a new deployment. For more infomation on volumes, see Amazon EBS - /// volumes in the Amazon Elastic Container Service Developer Guide. For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired - /// count, deployment configuration, health check grace period, task placement constraints and strategies, - /// enable ECS managed tags option, and propagate tags can be updated using this API. If the network - /// configuration, platform version, task definition, or load balancer need to be updated, create a new - /// CodeDeploy deployment. For more information, see CreateDeployment in the - /// CodeDeploy API Reference. For services using an external deployment controller, you can update only the desired count, task - /// placement constraints and strategies, health check grace period, enable ECS managed tags option, and - /// propagate tags option, using this API. If the launch type, load balancer, network configuration, - /// platform version, or task definition need to be updated, create a new task set For more information, - /// see CreateTaskSet. You can add to or subtract from the number of instantiations of a task definition in a service by - /// specifying the cluster that the service is running in and a new desiredCount - /// parameter. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a - /// task, or when creating or updating a service. For more infomation, see Amazon EBS - /// volumes in the Amazon Elastic Container Service Developer Guide. If you have updated the container image of your application, you can create a new task definition - /// with that image and deploy it to your service. The service scheduler uses the minimum healthy percent - /// and maximum percent parameters (in the service's deployment configuration) to determine the deployment - /// strategy. If your updated Docker image uses the same tag as what is in the existing task definition for - /// your service (for example, my_image:latest), you don't need to create a new revision - /// of your task definition. You can update the service using the forceNewDeployment - /// option. The new tasks launched by the deployment pull the current image/tag combination from your - /// repository when they start. You can also update the deployment configuration of a service. When a deployment is triggered by - /// updating the task definition of a service, the service scheduler uses the deployment configuration - /// parameters, minimumHealthyPercent and maximumPercent, to determine the - /// deployment strategy. If minimumHealthyPercent is below 100%, the scheduler can ignore + /// Modifies the parameters of a service. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. For services using the rolling update (ECS) you can update the desired + /// count, deployment configuration, network configuration, load balancers, service + /// registries, enable ECS managed tags option, propagate tags option, task placement + /// constraints and strategies, and task definition. When you update any of these + /// parameters, Amazon ECS starts new tasks with the new configuration. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or + /// running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update + /// your volume configurations and trigger a new deployment. + /// volumeConfigurations is only supported for REPLICA service and not + /// DAEMON service. If you leave volumeConfigurations null, it doesn't trigger a new deployment. For more infomation on volumes, + /// see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. For services using the blue/green (CODE_DEPLOY) deployment controller, + /// only the desired count, deployment configuration, health check grace period, task + /// placement constraints and strategies, enable ECS managed tags option, and propagate tags + /// can be updated using this API. If the network configuration, platform version, task + /// definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more + /// information, see CreateDeployment in the CodeDeploy API Reference. For services using an external deployment controller, you can update only the desired + /// count, task placement constraints and strategies, health check grace period, enable ECS + /// managed tags option, and propagate tags option, using this API. If the launch type, load + /// balancer, network configuration, platform version, or task definition need to be + /// updated, create a new task set For more information, see CreateTaskSet. You can add to or subtract from the number of instantiations of a task definition in a + /// service by specifying the cluster that the service is running in and a new + /// desiredCount parameter. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or + /// running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. If you have updated the container image of your application, you can create a new task + /// definition with that image and deploy it to your service. The service scheduler uses the + /// minimum healthy percent and maximum percent parameters (in the service's deployment + /// configuration) to determine the deployment strategy. If your updated Docker image uses the same tag as what is in the existing task + /// definition for your service (for example, my_image:latest), you don't + /// need to create a new revision of your task definition. You can update the service + /// using the forceNewDeployment option. The new tasks launched by the + /// deployment pull the current image/tag combination from your repository when they + /// start. You can also update the deployment configuration of a service. When a deployment is + /// triggered by updating the task definition of a service, the service scheduler uses the + /// deployment configuration parameters, minimumHealthyPercent and + /// maximumPercent, to determine the deployment strategy. If minimumHealthyPercent is below 100%, the scheduler can ignore /// desiredCount temporarily during a deployment. For example, if - /// desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two - /// existing tasks before starting two new tasks. Tasks for services that don't use a load balancer - /// are considered healthy if they're in the RUNNING state. Tasks for services that - /// use a load balancer are considered healthy if they're in the RUNNING state and are - /// reported as healthy by the load balancer. The maximumPercent parameter represents an upper limit on the number of running - /// tasks during a deployment. You can use it to define the deployment batch size. For example, if - /// desiredCount is four tasks, a maximum of 200% starts four new tasks before - /// stopping the four older tasks (provided that the cluster resources required to do this are - /// available). When UpdateService stops a task during a deployment, the equivalent of docker stop - /// is issued to the containers running in the task. This results in a SIGTERM and a 30-second - /// timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the - /// container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no - /// SIGKILL is sent. When the service scheduler launches new tasks, it determines task placement in your cluster with the - /// following logic. Determine which of the container instances in your cluster can support your service's task - /// definition. For example, they have the required CPU, memory, ports, and container instance - /// attributes. By default, the service scheduler attempts to balance tasks across Availability Zones in this - /// manner even though you can choose a different placement strategy. Sort the valid container instances by the fewest number of running tasks for this - /// service in the same Availability Zone as the instance. For example, if zone A has one - /// running service task and zones B and C each have zero, valid container instances in - /// either zone B or C are considered optimal for placement. Place the new service task on a valid container instance in an optimal Availability - /// Zone (based on the previous steps), favoring container instances with the fewest number - /// of running tasks for this service. When the service scheduler stops running tasks, it attempts to maintain balance across the - /// Availability Zones in your cluster using the following logic: Sort the container instances by the largest number of running tasks for this service in the - /// same Availability Zone as the instance. For example, if zone A has one running service task and - /// zones B and C each have two, container instances in either zone B or C are considered optimal - /// for termination. Stop the task on a container instance in an optimal Availability Zone (based on the previous - /// steps), favoring container instances with the largest number of running tasks for this - /// service. You must have a service-linked role when you update any of the following service - /// properties: loadBalancers, serviceRegistries For more information about the role see the CreateService request parameter role . + /// desiredCount is four tasks, a minimum of 50% allows the + /// scheduler to stop two existing tasks before starting two new tasks. Tasks for + /// services that don't use a load balancer are considered healthy if they're in the + /// RUNNING state. Tasks for services that use a load balancer are + /// considered healthy if they're in the RUNNING state and are reported + /// as healthy by the load balancer. The maximumPercent parameter represents an upper limit on the + /// number of running tasks during a deployment. You can use it to define the + /// deployment batch size. For example, if desiredCount is four tasks, + /// a maximum of 200% starts four new tasks before stopping the four older tasks + /// (provided that the cluster resources required to do this are available). When UpdateService + /// stops a task during a deployment, the equivalent of docker stop is issued + /// to the containers running in the task. This results in a SIGTERM and a + /// 30-second timeout. After this, SIGKILL is sent and the containers are + /// forcibly stopped. If the container handles the SIGTERM gracefully and exits + /// within 30 seconds from receiving it, no SIGKILL is sent. When the service scheduler launches new tasks, it determines task placement in your + /// cluster with the following logic. Determine which of the container instances in your cluster can support your + /// service's task definition. For example, they have the required CPU, memory, + /// ports, and container instance attributes. By default, the service scheduler attempts to balance tasks across + /// Availability Zones in this manner even though you can choose a different + /// placement strategy. Sort the valid container instances by the fewest number of running + /// tasks for this service in the same Availability Zone as the instance. + /// For example, if zone A has one running service task and zones B and C + /// each have zero, valid container instances in either zone B or C are + /// considered optimal for placement. Place the new service task on a valid container instance in an optimal + /// Availability Zone (based on the previous steps), favoring container + /// instances with the fewest number of running tasks for this + /// service. When the service scheduler stops running tasks, it attempts to maintain balance across + /// the Availability Zones in your cluster using the following logic: Sort the container instances by the largest number of running tasks for this + /// service in the same Availability Zone as the instance. For example, if zone A + /// has one running service task and zones B and C each have two, container + /// instances in either zone B or C are considered optimal for termination. Stop the task on a container instance in an optimal Availability Zone (based + /// on the previous steps), favoring container instances with the largest number of + /// running tasks for this service. You must have a service-linked role when you update any of the following service + /// properties: loadBalancers, serviceRegistries For more information about the role see the CreateService request + /// parameter role . /// /// Parameters: /// - availabilityZoneRebalancing: Indicates whether to use Availability Zone rebalancing for the service. For more information, see Balancing an Amazon ECS service across Availability Zones in - /// - capacityProviderStrategy: The capacity provider strategy to update the service to use. if the service uses the default capacity provider strategy for the cluster, the service can be + /// - capacityProviderStrategy: The capacity provider strategy to update the service to use. if the service uses the default capacity provider strategy for the cluster, the /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that your service runs on. - /// - deploymentConfiguration: Optional deployment parameters that control how many tasks run during the deployment and the ordering - /// - desiredCount: The number of instantiations of the task to place and keep running in your service. - /// - enableECSManagedTags: Determines whether to turn on Amazon ECS managed tags for the tasks in the service. For more information, - /// - enableExecuteCommand: If true, this enables execute command functionality on all task containers. If you do not want to override the value that was set when the service was created, you can set this - /// - forceNewDeployment: Determines whether to force a new deployment of the service. By default, deployments aren't forced. - /// - healthCheckGracePeriodSeconds: The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing, VPC Lattice, and container - /// - loadBalancers: A list of Elastic Load Balancing load balancer objects. It contains the load balancer name, the container name, and + /// - deploymentConfiguration: Optional deployment parameters that control how many tasks run during the deployment + /// - desiredCount: The number of instantiations of the task to place and keep running in your + /// - enableECSManagedTags: Determines whether to turn on Amazon ECS managed tags for the tasks in the service. For + /// - enableExecuteCommand: If true, this enables execute command functionality on all task + /// - forceNewDeployment: Determines whether to force a new deployment of the service. By default, deployments + /// - healthCheckGracePeriodSeconds: The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy + /// - loadBalancers: A list of Elastic Load Balancing load balancer objects. It contains the load balancer name, the /// - networkConfiguration: An object representing the network configuration for the service. - /// - placementConstraints: An array of task placement constraint objects to update the service to use. If no value is specified, - /// - placementStrategy: The task placement strategy objects to update the service to use. If no value is specified, the - /// - platformVersion: The platform version that your tasks in the service run on. A platform version is only specified for - /// - propagateTags: Determines whether to propagate the tags from the task definition or the service to the task. If no + /// - placementConstraints: An array of task placement constraint objects to update the service to use. If no + /// - placementStrategy: The task placement strategy objects to update the service to use. If no value is + /// - platformVersion: The platform version that your tasks in the service run on. A platform version is only + /// - propagateTags: Determines whether to propagate the tags from the task definition or the service to /// - service: The name of the service to update. /// - serviceConnectConfiguration: The configuration for this service to discover and connect to - /// - serviceRegistries: The details for the service discovery registries to assign to this service. For more information, see - /// - taskDefinition: The family and revision (family:revision) or full ARN of the - /// - volumeConfigurations: The details of the volume that was configuredAtLaunch. You can configure the size, - /// - vpcLatticeConfigurations: An object representing the VPC Lattice configuration for the service being updated. + /// - serviceRegistries: The details for the service discovery registries to assign to this service. For more + /// - taskDefinition: The family and revision (family:revision) or + /// - volumeConfigurations: The details of the volume that was configuredAtLaunch. You can configure + /// - vpcLatticeConfigurations: An object representing the VPC Lattice configuration for the service being /// - logger: Logger use during operation @inlinable public func updateService( @@ -3021,9 +3111,10 @@ public struct ECS: AWSService { return try await self.updateService(input, logger: logger) } - /// Modifies which task set in a service is the primary task set. Any parameters that are updated on the - /// primary task set in a service will transition to the service. This is used when a service uses the - /// EXTERNAL deployment controller type. For more information, see Amazon ECS Deployment + /// Modifies which task set in a service is the primary task set. Any parameters that are + /// updated on the primary task set in a service will transition to the service. This is + /// used when a service uses the EXTERNAL deployment controller type. For more + /// information, see Amazon ECS Deployment /// Types in the Amazon Elastic Container Service Developer Guide. @Sendable @inlinable @@ -3037,13 +3128,14 @@ public struct ECS: AWSService { logger: logger ) } - /// Modifies which task set in a service is the primary task set. Any parameters that are updated on the - /// primary task set in a service will transition to the service. This is used when a service uses the - /// EXTERNAL deployment controller type. For more information, see Amazon ECS Deployment + /// Modifies which task set in a service is the primary task set. Any parameters that are + /// updated on the primary task set in a service will transition to the service. This is + /// used when a service uses the EXTERNAL deployment controller type. For more + /// information, see Amazon ECS Deployment /// Types in the Amazon Elastic Container Service Developer Guide. /// /// Parameters: - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task set exists + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task /// - primaryTaskSet: The short name or full Amazon Resource Name (ARN) of the task set to set as the primary task set in the /// - service: The short name or full Amazon Resource Name (ARN) of the service that the task set exists in. /// - logger: Logger use during operation @@ -3063,18 +3155,21 @@ public struct ECS: AWSService { } /// Updates the protection status of a task. You can set protectionEnabled to - /// true to protect your task from termination during scale-in events from Service + /// true to protect your task from termination during scale-in events from + /// Service /// Autoscaling or deployments. Task-protection, by default, expires after 2 hours at which point Amazon ECS clears the - /// protectionEnabled property making the task eligible for termination by a subsequent - /// scale-in event. You can specify a custom expiration period for task protection from 1 minute to up to 2,880 minutes - /// (48 hours). To specify the custom expiration period, set the expiresInMinutes property. - /// The expiresInMinutes property is always reset when you invoke this operation for a task - /// that already has protectionEnabled set to true. You can keep extending the + /// protectionEnabled property making the task eligible for termination by + /// a subsequent scale-in event. You can specify a custom expiration period for task protection from 1 minute to up to + /// 2,880 minutes (48 hours). To specify the custom expiration period, set the + /// expiresInMinutes property. The expiresInMinutes property + /// is always reset when you invoke this operation for a task that already has + /// protectionEnabled set to true. You can keep extending the /// protection expiration period of a task by invoking this operation repeatedly. To learn more about Amazon ECS task protection, see Task scale-in - /// protection in the Amazon Elastic Container Service Developer Guide . This operation is only supported for tasks belonging to an Amazon ECS service. Invoking this operation - /// for a standalone task will result in an TASK_NOT_VALID failure. For more information, - /// see API failure reasons. If you prefer to set task protection from within the container, we recommend using the Task scale-in - /// protection endpoint. + /// protection in the Amazon Elastic Container Service Developer Guide . This operation is only supported for tasks belonging to an Amazon ECS service. Invoking + /// this operation for a standalone task will result in an TASK_NOT_VALID + /// failure. For more information, see API failure + /// reasons. If you prefer to set task protection from within the container, we recommend using + /// the Task scale-in protection endpoint. @Sendable @inlinable public func updateTaskProtection(_ input: UpdateTaskProtectionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateTaskProtectionResponse { @@ -3088,23 +3183,26 @@ public struct ECS: AWSService { ) } /// Updates the protection status of a task. You can set protectionEnabled to - /// true to protect your task from termination during scale-in events from Service + /// true to protect your task from termination during scale-in events from + /// Service /// Autoscaling or deployments. Task-protection, by default, expires after 2 hours at which point Amazon ECS clears the - /// protectionEnabled property making the task eligible for termination by a subsequent - /// scale-in event. You can specify a custom expiration period for task protection from 1 minute to up to 2,880 minutes - /// (48 hours). To specify the custom expiration period, set the expiresInMinutes property. - /// The expiresInMinutes property is always reset when you invoke this operation for a task - /// that already has protectionEnabled set to true. You can keep extending the + /// protectionEnabled property making the task eligible for termination by + /// a subsequent scale-in event. You can specify a custom expiration period for task protection from 1 minute to up to + /// 2,880 minutes (48 hours). To specify the custom expiration period, set the + /// expiresInMinutes property. The expiresInMinutes property + /// is always reset when you invoke this operation for a task that already has + /// protectionEnabled set to true. You can keep extending the /// protection expiration period of a task by invoking this operation repeatedly. To learn more about Amazon ECS task protection, see Task scale-in - /// protection in the Amazon Elastic Container Service Developer Guide . This operation is only supported for tasks belonging to an Amazon ECS service. Invoking this operation - /// for a standalone task will result in an TASK_NOT_VALID failure. For more information, - /// see API failure reasons. If you prefer to set task protection from within the container, we recommend using the Task scale-in - /// protection endpoint. + /// protection in the Amazon Elastic Container Service Developer Guide . This operation is only supported for tasks belonging to an Amazon ECS service. Invoking + /// this operation for a standalone task will result in an TASK_NOT_VALID + /// failure. For more information, see API failure + /// reasons. If you prefer to set task protection from within the container, we recommend using + /// the Task scale-in protection endpoint. /// /// Parameters: - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task sets exist - /// - expiresInMinutes: If you set protectionEnabled to true, you can specify the duration for task - /// - protectionEnabled: Specify true to mark a task for protection and false to unset protection, + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task + /// - expiresInMinutes: If you set protectionEnabled to true, you can specify the + /// - protectionEnabled: Specify true to mark a task for protection and false to /// - tasks: A list of up to 10 task IDs or full ARN entries. /// - logger: Logger use during operation @inlinable @@ -3124,9 +3222,9 @@ public struct ECS: AWSService { return try await self.updateTaskProtection(input, logger: logger) } - /// Modifies a task set. This is used when a service uses the EXTERNAL deployment controller - /// type. For more information, see Amazon ECS Deployment Types in the - /// Amazon Elastic Container Service Developer Guide. + /// Modifies a task set. This is used when a service uses the EXTERNAL + /// deployment controller type. For more information, see Amazon ECS Deployment + /// Types in the Amazon Elastic Container Service Developer Guide. @Sendable @inlinable public func updateTaskSet(_ input: UpdateTaskSetRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateTaskSetResponse { @@ -3139,13 +3237,13 @@ public struct ECS: AWSService { logger: logger ) } - /// Modifies a task set. This is used when a service uses the EXTERNAL deployment controller - /// type. For more information, see Amazon ECS Deployment Types in the - /// Amazon Elastic Container Service Developer Guide. + /// Modifies a task set. This is used when a service uses the EXTERNAL + /// deployment controller type. For more information, see Amazon ECS Deployment + /// Types in the Amazon Elastic Container Service Developer Guide. /// /// Parameters: - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task set is found - /// - scale: A floating-point percentage of the desired number of tasks to place and keep running in the task + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task + /// - scale: A floating-point percentage of the desired number of tasks to place and keep running /// - service: The short name or full Amazon Resource Name (ARN) of the service that the task set is found in. /// - taskSet: The short name or full Amazon Resource Name (ARN) of the task set to update. /// - logger: Logger use during operation @@ -3201,11 +3299,11 @@ extension ECS { /// Return PaginatorSequence for operation ``listAccountSettings(_:logger:)``. /// /// - Parameters: - /// - effectiveSettings: Determines whether to return the effective settings. If true, the account settings for - /// - maxResults: The maximum number of account setting results returned by ListAccountSettings in + /// - effectiveSettings: Determines whether to return the effective settings. If true, the account + /// - maxResults: The maximum number of account setting results returned by /// - name: The name of the account setting you want to list the settings for. - /// - principalArn: The ARN of the principal, which can be a user, role, or the root user. If this field is omitted, the - /// - value: The value of the account settings to filter results with. You must also specify an account setting + /// - principalArn: The ARN of the principal, which can be a user, role, or the root user. If this field is + /// - value: The value of the account settings to filter results with. You must also specify an /// - logger: Logger used for logging @inlinable public func listAccountSettingsPaginator( @@ -3248,9 +3346,9 @@ extension ECS { /// /// - Parameters: /// - attributeName: The name of the attribute to filter the results with. - /// - attributeValue: The value of the attribute to filter results with. You must also specify an attribute name to use - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster to list attributes. If you do not specify a cluster, the default cluster is assumed. - /// - maxResults: The maximum number of cluster results that ListAttributes returned in paginated output. + /// - attributeValue: The value of the attribute to filter results with. You must also specify an attribute + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster to list attributes. + /// - maxResults: The maximum number of cluster results that ListAttributes returned in /// - targetType: The type of the target to list attributes with. /// - logger: Logger used for logging @inlinable @@ -3293,7 +3391,7 @@ extension ECS { /// Return PaginatorSequence for operation ``listClusters(_:logger:)``. /// /// - Parameters: - /// - maxResults: The maximum number of cluster results that ListClusters returned in paginated output. + /// - maxResults: The maximum number of cluster results that ListClusters returned in /// - logger: Logger used for logging @inlinable public func listClustersPaginator( @@ -3327,10 +3425,10 @@ extension ECS { /// Return PaginatorSequence for operation ``listContainerInstances(_:logger:)``. /// /// - Parameters: - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to list. - /// - filter: You can filter the results of a ListContainerInstances operation with cluster query - /// - maxResults: The maximum number of container instance results that ListContainerInstances returned in - /// - status: Filters the container instances by status. For example, if you specify the DRAINING + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to + /// - filter: You can filter the results of a ListContainerInstances operation with + /// - maxResults: The maximum number of container instance results that + /// - status: Filters the container instances by status. For example, if you specify the /// - logger: Logger used for logging @inlinable public func listContainerInstancesPaginator( @@ -3370,10 +3468,10 @@ extension ECS { /// Return PaginatorSequence for operation ``listServices(_:logger:)``. /// /// - Parameters: - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster to use when filtering the ListServices + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster to use when filtering the /// - launchType: The launch type to use when filtering the ListServices results. - /// - maxResults: The maximum number of service results that ListServices returned in paginated output. - /// - schedulingStrategy: The scheduling strategy to use when filtering the ListServices results. + /// - maxResults: The maximum number of service results that ListServices returned in + /// - schedulingStrategy: The scheduling strategy to use when filtering the ListServices /// - logger: Logger used for logging @inlinable public func listServicesPaginator( @@ -3413,7 +3511,7 @@ extension ECS { /// Return PaginatorSequence for operation ``listServicesByNamespace(_:logger:)``. /// /// - Parameters: - /// - maxResults: The maximum number of service results that ListServicesByNamespace returns in paginated + /// - maxResults: The maximum number of service results that ListServicesByNamespace /// - namespace: The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace to list the services in. Tasks that run in a namespace can use short names to connect /// - logger: Logger used for logging @inlinable @@ -3451,8 +3549,8 @@ extension ECS { /// /// - Parameters: /// - familyPrefix: The familyPrefix is a string that's used to filter the results of - /// - maxResults: The maximum number of task definition family results that ListTaskDefinitionFamilies - /// - status: The task definition family status to filter the ListTaskDefinitionFamilies results with. + /// - maxResults: The maximum number of task definition family results that + /// - status: The task definition family status to filter the /// - logger: Logger used for logging @inlinable public func listTaskDefinitionFamiliesPaginator( @@ -3490,10 +3588,10 @@ extension ECS { /// Return PaginatorSequence for operation ``listTaskDefinitions(_:logger:)``. /// /// - Parameters: - /// - familyPrefix: The full family name to filter the ListTaskDefinitions results with. Specifying a - /// - maxResults: The maximum number of task definition results that ListTaskDefinitions returned in - /// - sort: The order to sort the results in. Valid values are ASC and DESC. By - /// - status: The task definition status to filter the ListTaskDefinitions results with. By default, + /// - familyPrefix: The full family name to filter the ListTaskDefinitions results with. + /// - maxResults: The maximum number of task definition results that ListTaskDefinitions + /// - sort: The order to sort the results in. Valid values are ASC and + /// - status: The task definition status to filter the ListTaskDefinitions results /// - logger: Logger used for logging @inlinable public func listTaskDefinitionsPaginator( @@ -3533,14 +3631,14 @@ extension ECS { /// Return PaginatorSequence for operation ``listTasks(_:logger:)``. /// /// - Parameters: - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster to use when filtering the ListTasks - /// - containerInstance: The container instance ID or full ARN of the container instance to use when filtering the - /// - desiredStatus: The task desired status to use when filtering the ListTasks results. Specifying a - /// - family: The name of the task definition family to use when filtering the ListTasks results. + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster to use when filtering the + /// - containerInstance: The container instance ID or full ARN of the container instance to use when + /// - desiredStatus: The task desired status to use when filtering the ListTasks results. + /// - family: The name of the task definition family to use when filtering the /// - launchType: The launch type to use when filtering the ListTasks results. - /// - maxResults: The maximum number of task results that ListTasks returned in paginated output. When - /// - serviceName: The name of the service to use when filtering the ListTasks results. Specifying a - /// - startedBy: The startedBy value to filter the task results with. Specifying a startedBy + /// - maxResults: The maximum number of task results that ListTasks returned in paginated + /// - serviceName: The name of the service to use when filtering the ListTasks results. + /// - startedBy: The startedBy value to filter the task results with. Specifying a /// - logger: Logger used for logging @inlinable public func listTasksPaginator( @@ -3714,8 +3812,8 @@ extension ECS { /// /// - Parameters: /// - cluster: The short name or full Amazon Resource Name (ARN)the cluster that hosts the service to describe. - /// - include: Determines whether you want to see the resource tags for the service. If TAGS is - /// - services: A list of services to describe. You may specify up to 10 services to describe in a single + /// - include: Determines whether you want to see the resource tags for the service. If + /// - services: A list of services to describe. You may specify up to 10 services to describe in a /// - logger: Logger used for logging @inlinable public func waitUntilServicesInactive( @@ -3759,8 +3857,8 @@ extension ECS { /// /// - Parameters: /// - cluster: The short name or full Amazon Resource Name (ARN)the cluster that hosts the service to describe. - /// - include: Determines whether you want to see the resource tags for the service. If TAGS is - /// - services: A list of services to describe. You may specify up to 10 services to describe in a single + /// - include: Determines whether you want to see the resource tags for the service. If + /// - services: A list of services to describe. You may specify up to 10 services to describe in a /// - logger: Logger used for logging @inlinable public func waitUntilServicesStable( @@ -3802,8 +3900,8 @@ extension ECS { /// Waiter for operation ``describeTasks(_:logger:)``. /// /// - Parameters: - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task or tasks to describe. - /// - include: Specifies whether you want to see the resource tags for the task. If TAGS is specified, + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task or tasks to + /// - include: Specifies whether you want to see the resource tags for the task. If TAGS /// - tasks: A list of up to 100 task IDs or full ARN entries. /// - logger: Logger used for logging @inlinable @@ -3844,8 +3942,8 @@ extension ECS { /// Waiter for operation ``describeTasks(_:logger:)``. /// /// - Parameters: - /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task or tasks to describe. - /// - include: Specifies whether you want to see the resource tags for the task. If TAGS is specified, + /// - cluster: The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task or tasks to + /// - include: Specifies whether you want to see the resource tags for the task. If TAGS /// - tasks: A list of up to 100 task IDs or full ARN entries. /// - logger: Logger used for logging @inlinable diff --git a/Sources/Soto/Services/ECS/ECS_shapes.swift b/Sources/Soto/Services/ECS/ECS_shapes.swift index 896788fdbf..f730fddc17 100644 --- a/Sources/Soto/Services/ECS/ECS_shapes.swift +++ b/Sources/Soto/Services/ECS/ECS_shapes.swift @@ -490,17 +490,20 @@ extension ECS { // MARK: Shapes public struct Attachment: AWSDecodableShape { - /// Details of the attachment. For elastic network interfaces, this includes the network interface ID, the MAC address, the subnet - /// ID, and the private IPv4 address. For Service Connect services, this includes portName, clientAliases, - /// discoveryName, and ingressPortOverride. For Elastic Block Storage, this includes roleArn, deleteOnTermination, - /// volumeName, volumeId, and statusReason (only when the - /// attachment fails to create or attach). + /// Details of the attachment. For elastic network interfaces, this includes the network interface ID, the MAC + /// address, the subnet ID, and the private IPv4 address. For Service Connect services, this includes portName, + /// clientAliases, discoveryName, and + /// ingressPortOverride. For Elastic Block Storage, this includes roleArn, + /// deleteOnTermination, volumeName, volumeId, + /// and statusReason (only when the attachment fails to create or + /// attach). public let details: [KeyValuePair]? /// The unique identifier for the attachment. public let id: String? - /// The status of the attachment. Valid values are PRECREATED, CREATED, - /// ATTACHING, ATTACHED, DETACHING, DETACHED, - /// DELETED, and FAILED. + /// The status of the attachment. Valid values are PRECREATED, + /// CREATED, ATTACHING, ATTACHED, + /// DETACHING, DETACHED, DELETED, and + /// FAILED. public let status: String? /// The type of the attachment, such as ElasticNetworkInterface, /// Service Connect, and AmazonElasticBlockStorage. @@ -541,19 +544,20 @@ extension ECS { } public struct Attribute: AWSEncodableShape & AWSDecodableShape { - /// The name of the attribute. The name must contain between 1 and 128 characters. The name - /// may contain letters (uppercase and lowercase), numbers, hyphens (-), underscores (_), forward slashes - /// (/), back slashes (\), or periods (.). + /// The name of the attribute. The name must contain between 1 and 128 + /// characters. The name may contain letters (uppercase and lowercase), numbers, hyphens + /// (-), underscores (_), forward slashes (/), back slashes (\), or periods (.). public let name: String - /// The ID of the target. You can specify the short form ID for a resource or the full Amazon Resource Name (ARN). + /// The ID of the target. You can specify the short form ID for a resource or the full + /// Amazon Resource Name (ARN). public let targetId: String? - /// The type of the target to attach the attribute with. This parameter is required if you use the short - /// form ID for a resource instead of the full ARN. + /// The type of the target to attach the attribute with. This parameter is required if you + /// use the short form ID for a resource instead of the full ARN. public let targetType: TargetType? - /// The value of the attribute. The value must contain between 1 and 128 characters. It can - /// contain letters (uppercase and lowercase), numbers, hyphens (-), underscores (_), periods (.), at signs - /// (@), forward slashes (/), back slashes (\), colons (:), or spaces. The value can't start or end with a - /// space. + /// The value of the attribute. The value must contain between 1 and 128 + /// characters. It can contain letters (uppercase and lowercase), numbers, hyphens (-), + /// underscores (_), periods (.), at signs (@), forward slashes (/), back slashes (\), + /// colons (:), or spaces. The value can't start or end with a space. public let value: String? @inlinable @@ -573,21 +577,21 @@ extension ECS { } public struct AutoScalingGroupProvider: AWSEncodableShape & AWSDecodableShape { - /// The Amazon Resource Name (ARN) that identifies the Auto Scaling group, or the Auto Scaling group name. + /// The Amazon Resource Name (ARN) that identifies the Auto Scaling group, or the Auto Scaling group + /// name. public let autoScalingGroupArn: String /// The managed draining option for the Auto Scaling group capacity provider. When you enable this, Amazon ECS manages and gracefully drains the EC2 container instances that are in the Auto Scaling group capacity provider. public let managedDraining: ManagedDraining? /// The managed scaling settings for the Auto Scaling group capacity provider. public let managedScaling: ManagedScaling? - /// The managed termination protection setting to use for the Auto Scaling group capacity provider. This - /// determines whether the Auto Scaling group has managed termination protection. The default is - /// off. When using managed termination protection, managed scaling must also be used otherwise managed - /// termination protection doesn't work. When managed termination protection is on, Amazon ECS prevents the Amazon EC2 instances in an Auto Scaling - /// group that contain tasks from being terminated during a scale-in action. The Auto Scaling group and - /// each instance in the Auto Scaling group must have instance protection from scale-in actions on as well. - /// For more information, see Instance - /// Protection in the Auto Scaling User Guide. When managed termination protection is off, your Amazon EC2 instances aren't protected from termination - /// when the Auto Scaling group scales in. + /// The managed termination protection setting to use for the Auto Scaling group capacity + /// provider. This determines whether the Auto Scaling group has managed termination + /// protection. The default is off. When using managed termination protection, managed scaling must also be used + /// otherwise managed termination protection doesn't work. When managed termination protection is on, Amazon ECS prevents the Amazon EC2 instances in an + /// Auto Scaling group that contain tasks from being terminated during a scale-in action. + /// The Auto Scaling group and each instance in the Auto Scaling group must have instance + /// protection from scale-in actions on as well. For more information, see Instance Protection in the Auto Scaling User Guide. When managed termination protection is off, your Amazon EC2 instances aren't protected from + /// termination when the Auto Scaling group scales in. public let managedTerminationProtection: ManagedTerminationProtection? @inlinable @@ -615,14 +619,14 @@ extension ECS { public let managedDraining: ManagedDraining? /// The managed scaling settings for the Auto Scaling group capacity provider. public let managedScaling: ManagedScaling? - /// The managed termination protection setting to use for the Auto Scaling group capacity provider. This - /// determines whether the Auto Scaling group has managed termination protection. When using managed termination protection, managed scaling must also be used otherwise managed - /// termination protection doesn't work. When managed termination protection is on, Amazon ECS prevents the Amazon EC2 instances in an Auto Scaling - /// group that contain tasks from being terminated during a scale-in action. The Auto Scaling group and - /// each instance in the Auto Scaling group must have instance protection from scale-in actions on. For - /// more information, see Instance - /// Protection in the Auto Scaling User Guide. When managed termination protection is off, your Amazon EC2 instances aren't protected from termination - /// when the Auto Scaling group scales in. + /// The managed termination protection setting to use for the Auto Scaling group capacity + /// provider. This determines whether the Auto Scaling group has managed termination + /// protection. When using managed termination protection, managed scaling must also be used + /// otherwise managed termination protection doesn't work. When managed termination protection is on, Amazon ECS prevents the Amazon EC2 instances in an + /// Auto Scaling group that contain tasks from being terminated during a scale-in action. + /// The Auto Scaling group and each instance in the Auto Scaling group must have instance + /// protection from scale-in actions on. For more information, see Instance Protection in the Auto Scaling User Guide. When managed termination protection is off, your Amazon EC2 instances aren't protected from + /// termination when the Auto Scaling group scales in. public let managedTerminationProtection: ManagedTerminationProtection? @inlinable @@ -644,15 +648,16 @@ extension ECS { } public struct AwsVpcConfiguration: AWSEncodableShape & AWSDecodableShape { - /// Whether the task's elastic network interface receives a public IP address. The default value is - /// ENABLED. + /// Whether the task's elastic network interface receives a public IP address. The default + /// value is ENABLED. public let assignPublicIp: AssignPublicIp? - /// The IDs of the security groups associated with the task or service. If you don't specify a security - /// group, the default security group for the VPC is used. There's a limit of 5 security groups that can be - /// specified per awsvpcConfiguration. All specified security groups must be from the same VPC. + /// The IDs of the security groups associated with the task or service. If you don't + /// specify a security group, the default security group for the VPC is used. There's a + /// limit of 5 security groups that can be specified per + /// awsvpcConfiguration. All specified security groups must be from the same VPC. public let securityGroups: [String]? - /// The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be - /// specified per awsvpcConfiguration. All specified subnets must be from the same VPC. + /// The IDs of the subnets associated with the task or service. There's a limit of 16 + /// subnets that can be specified per awsvpcConfiguration. All specified subnets must be from the same VPC. public let subnets: [String] @inlinable @@ -676,20 +681,20 @@ extension ECS { public let capacityProviderArn: String? /// The name of the capacity provider. public let name: String? - /// The current status of the capacity provider. Only capacity providers in an ACTIVE state - /// can be used in a cluster. When a capacity provider is successfully deleted, it has an - /// INACTIVE status. + /// The current status of the capacity provider. Only capacity providers in an + /// ACTIVE state can be used in a cluster. When a capacity provider is + /// successfully deleted, it has an INACTIVE status. public let status: CapacityProviderStatus? - /// The metadata that you apply to the capacity provider to help you categorize and organize it. Each tag - /// consists of a key and an optional value. You define both. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. + /// The metadata that you apply to the capacity provider to help you categorize and + /// organize it. Each tag consists of a key and an optional value. You define both. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. public let tags: [Tag]? - /// The update status of the capacity provider. The following are the possible states that is - /// returned. DELETE_IN_PROGRESS The capacity provider is in the process of being deleted. DELETE_COMPLETE The capacity provider was successfully deleted and has an INACTIVE - /// status. DELETE_FAILED The capacity provider can't be deleted. The update status reason provides further details - /// about why the delete failed. + /// The update status of the capacity provider. The following are the possible states that + /// is returned. DELETE_IN_PROGRESS The capacity provider is in the process of being deleted. DELETE_COMPLETE The capacity provider was successfully deleted and has an + /// INACTIVE status. DELETE_FAILED The capacity provider can't be deleted. The update status reason provides + /// further details about why the delete failed. public let updateStatus: CapacityProviderUpdateStatus? - /// The update status reason. This provides further details about the update status for the capacity - /// provider. + /// The update status reason. This provides further details about the update status for + /// the capacity provider. public let updateStatusReason: String? @inlinable @@ -715,27 +720,30 @@ extension ECS { } public struct CapacityProviderStrategyItem: AWSEncodableShape & AWSDecodableShape { - /// The base value designates how many tasks, at a minimum, to run on the specified - /// capacity provider. Only one capacity provider in a capacity provider strategy can have a - /// base defined. If no value is specified, the default value of 0 is - /// used. + /// The base value designates how many tasks, at a minimum, to run on + /// the specified capacity provider. Only one capacity provider in a capacity provider + /// strategy can have a base defined. If no value is specified, the + /// default value of 0 is used. public let base: Int? /// The short name of the capacity provider. public let capacityProvider: String - /// The weight value designates the relative percentage of the total number of tasks - /// launched that should use the specified capacity provider. The weight value is taken into - /// consideration after the base value, if defined, is satisfied. If no weight value is specified, the default value of 0 is used. When - /// multiple capacity providers are specified within a capacity provider strategy, at least one of the - /// capacity providers must have a weight value greater than zero and any capacity providers with a weight - /// of 0 can't be used to place tasks. If you specify multiple capacity providers in a - /// strategy that all have a weight of 0, any RunTask or - /// CreateService actions using the capacity provider strategy will fail. An example scenario for using weights is defining a strategy that contains two capacity providers and - /// both have a weight of 1, then when the base is satisfied, the tasks will be - /// split evenly across the two capacity providers. Using that same logic, if you specify a weight of - /// 1 for capacityProviderA and a weight of 4 for + /// The weight value designates the relative percentage of the total + /// number of tasks launched that should use the specified capacity provider. The + /// weight value is taken into consideration after the base + /// value, if defined, is satisfied. If no weight value is specified, the default value of 0 is + /// used. When multiple capacity providers are specified within a capacity provider + /// strategy, at least one of the capacity providers must have a weight value greater than + /// zero and any capacity providers with a weight of 0 can't be used to place + /// tasks. If you specify multiple capacity providers in a strategy that all have a weight + /// of 0, any RunTask or CreateService actions using + /// the capacity provider strategy will fail. An example scenario for using weights is defining a strategy that contains two + /// capacity providers and both have a weight of 1, then when the + /// base is satisfied, the tasks will be split evenly across the two + /// capacity providers. Using that same logic, if you specify a weight of 1 for + /// capacityProviderA and a weight of 4 for /// capacityProviderB, then for every one task that's run using /// capacityProviderA, four tasks would use - /// capacityProviderB. + /// capacityProviderB. public let weight: Int? @inlinable @@ -760,32 +768,33 @@ extension ECS { } public struct Cluster: AWSDecodableShape { - /// The number of services that are running on the cluster in an ACTIVE state. You can view - /// these services with PListServices. + /// The number of services that are running on the cluster in an ACTIVE + /// state. You can view these services with PListServices. public let activeServicesCount: Int? - /// The resources attached to a cluster. When using a capacity provider with a cluster, the capacity - /// provider and associated resources are returned as cluster attachments. + /// The resources attached to a cluster. When using a capacity provider with a cluster, + /// the capacity provider and associated resources are returned as cluster + /// attachments. public let attachments: [Attachment]? - /// The status of the capacity providers associated with the cluster. The following are the states that - /// are returned. UPDATE_IN_PROGRESS The available capacity providers for the cluster are updating. UPDATE_COMPLETE The capacity providers have successfully updated. UPDATE_FAILED The capacity provider updates failed. + /// The status of the capacity providers associated with the cluster. The following are + /// the states that are returned. UPDATE_IN_PROGRESS The available capacity providers for the cluster are updating. UPDATE_COMPLETE The capacity providers have successfully updated. UPDATE_FAILED The capacity provider updates failed. public let attachmentsStatus: String? /// The capacity providers associated with the cluster. public let capacityProviders: [String]? - /// The Amazon Resource Name (ARN) that identifies the cluster. For more information about the ARN format, see Amazon Resource Name (ARN) - /// in the Amazon ECS Developer Guide. + /// The Amazon Resource Name (ARN) that identifies the cluster. For more information about the ARN + /// format, see Amazon Resource Name (ARN) in the Amazon ECS Developer Guide. public let clusterArn: String? /// A user-generated string that you use to identify your cluster. public let clusterName: String? /// The execute command configuration for the cluster. public let configuration: ClusterConfiguration? - /// The default capacity provider strategy for the cluster. When services or tasks are run in the cluster - /// with no launch type or capacity provider strategy specified, the default capacity provider strategy is - /// used. + /// The default capacity provider strategy for the cluster. When services or tasks are run + /// in the cluster with no launch type or capacity provider strategy specified, the default + /// capacity provider strategy is used. public let defaultCapacityProviderStrategy: [CapacityProviderStrategyItem]? /// The number of tasks in the cluster that are in the PENDING state. public let pendingTasksCount: Int? - /// The number of container instances registered into the cluster. This includes container instances in - /// both ACTIVE and DRAINING status. + /// The number of container instances registered into the cluster. This includes container + /// instances in both ACTIVE and DRAINING status. public let registeredContainerInstancesCount: Int? /// The number of tasks in the cluster that are in the RUNNING state. public let runningTasksCount: Int? @@ -801,23 +810,24 @@ extension ECS { /// Only the tasks that Amazon ECS services create are supported with Service Connect. /// For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide. public let serviceConnectDefaults: ClusterServiceConnectDefaults? - /// The settings for the cluster. This parameter indicates whether CloudWatch Container Insights is on or off - /// for a cluster. + /// The settings for the cluster. This parameter indicates whether CloudWatch Container Insights + /// is on or off for a cluster. public let settings: [ClusterSetting]? - /// Additional information about your clusters that are separated by launch type. They include the - /// following: runningEC2TasksCount RunningFargateTasksCount pendingEC2TasksCount pendingFargateTasksCount activeEC2ServiceCount activeFargateServiceCount drainingEC2ServiceCount drainingFargateServiceCount + /// Additional information about your clusters that are separated by launch type. They + /// include the following: runningEC2TasksCount RunningFargateTasksCount pendingEC2TasksCount pendingFargateTasksCount activeEC2ServiceCount activeFargateServiceCount drainingEC2ServiceCount drainingFargateServiceCount public let statistics: [KeyValuePair]? - /// The status of the cluster. The following are the possible states that are returned. ACTIVE The cluster is ready to accept tasks and if applicable you can register container - /// instances with the cluster. PROVISIONING The cluster has capacity providers that are associated with it and the resources needed - /// for the capacity provider are being created. DEPROVISIONING The cluster has capacity providers that are associated with it and the resources needed - /// for the capacity provider are being deleted. FAILED The cluster has capacity providers that are associated with it and the resources needed - /// for the capacity provider have failed to create. INACTIVE The cluster has been deleted. Clusters with an INACTIVE status may remain - /// discoverable in your account for a period of time. However, this behavior is subject to - /// change in the future. We don't recommend that you rely on INACTIVE clusters - /// persisting. + /// The status of the cluster. The following are the possible states that are + /// returned. ACTIVE The cluster is ready to accept tasks and if applicable you can register + /// container instances with the cluster. PROVISIONING The cluster has capacity providers that are associated with it and the + /// resources needed for the capacity provider are being created. DEPROVISIONING The cluster has capacity providers that are associated with it and the + /// resources needed for the capacity provider are being deleted. FAILED The cluster has capacity providers that are associated with it and the + /// resources needed for the capacity provider have failed to create. INACTIVE The cluster has been deleted. Clusters with an INACTIVE + /// status may remain discoverable in your account for a period of time. + /// However, this behavior is subject to change in the future. We don't + /// recommend that you rely on INACTIVE clusters persisting. public let status: String? - /// The metadata that you apply to the cluster to help you categorize and organize them. Each tag - /// consists of a key and an optional value. You define both. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. + /// The metadata that you apply to the cluster to help you categorize and organize them. + /// Each tag consists of a key and an optional value. You define both. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. public let tags: [Tag]? @inlinable @@ -879,8 +889,8 @@ extension ECS { } public struct ClusterServiceConnectDefaults: AWSDecodableShape { - /// The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace. When you create a service and don't specify a Service Connect - /// configuration, this namespace is used. + /// The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace. When you create a service and don't specify a + /// Service Connect configuration, this namespace is used. public let namespace: String? @inlinable @@ -894,16 +904,18 @@ extension ECS { } public struct ClusterServiceConnectDefaultsRequest: AWSEncodableShape { - /// The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace that's used when you create a service and don't specify a - /// Service Connect configuration. The namespace name can include up to 1024 characters. The name is - /// case-sensitive. The name can't include hyphens (-), tilde (~), greater than (>), less than ( If you enter an existing namespace name or ARN, then that namespace will be used. Any namespace - /// type is supported. The namespace must be in this account and this Amazon Web Services Region. If you enter a new name, a Cloud Map namespace will be created. Amazon ECS creates a Cloud Map namespace - /// with the "API calls" method of instance discovery only. This instance discovery method is the "HTTP" - /// namespace type in the Command Line Interface. Other types of instance discovery aren't used by - /// Service Connect. If you update the cluster with an empty string "" for the namespace name, the cluster - /// configuration for Service Connect is removed. Note that the namespace will remain in Cloud Map and must - /// be deleted separately. For more information about Cloud Map, see Working with Services in the - /// Cloud Map Developer Guide. + /// The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace that's used when you create a service and don't specify + /// a Service Connect configuration. The namespace name can include up to 1024 characters. + /// The name is case-sensitive. The name can't include hyphens (-), tilde (~), greater than + /// (>), less than ( If you enter an existing namespace name or ARN, then that namespace will be used. + /// Any namespace type is supported. The namespace must be in this account and this Amazon Web Services + /// Region. If you enter a new name, a Cloud Map namespace will be created. Amazon ECS creates a + /// Cloud Map namespace with the "API calls" method of instance discovery only. This instance + /// discovery method is the "HTTP" namespace type in the Command Line Interface. Other types of instance + /// discovery aren't used by Service Connect. If you update the cluster with an empty string "" for the namespace name, + /// the cluster configuration for Service Connect is removed. Note that the namespace will + /// remain in Cloud Map and must be deleted separately. For more information about Cloud Map, see Working with Services + /// in the Cloud Map Developer Guide. public let namespace: String @inlinable @@ -919,11 +931,11 @@ extension ECS { public struct ClusterSetting: AWSEncodableShape & AWSDecodableShape { /// The name of the cluster setting. The value is containerInsights . public let name: ClusterSettingName? - /// The value to set for the cluster setting. The supported values are enhanced, - /// enabled, and disabled. To use Container Insights with enhanced observability, set the + /// The value to set for the cluster setting. The supported values are + /// enhanced, enabled, and disabled. To use Container Insights with enhanced observability, set the /// containerInsights account setting to enhanced. To use Container Insights, set the containerInsights account setting to - /// enabled. If a cluster value is specified, it will override the containerInsights value - /// set with PutAccountSetting or PutAccountSettingDefault. + /// enabled. If a cluster value is specified, it will override the containerInsights + /// value set with PutAccountSetting or PutAccountSettingDefault. public let value: String? @inlinable @@ -941,15 +953,17 @@ extension ECS { public struct Container: AWSDecodableShape { /// The Amazon Resource Name (ARN) of the container. public let containerArn: String? - /// The number of CPU units set for the container. The value is 0 if no value was specified - /// in the container definition when the task definition was registered. + /// The number of CPU units set for the container. The value is 0 if no value + /// was specified in the container definition when the task definition was + /// registered. public let cpu: String? /// The exit code returned from the container. public let exitCode: Int? /// The IDs of each GPU assigned to the container. public let gpuIds: [String]? - /// The health status of the container. If health checks aren't configured for this container in its task - /// definition, then it reports the health status as UNKNOWN. + /// The health status of the container. If health checks aren't configured for this + /// container in its task definition, then it reports the health status as + /// UNKNOWN. public let healthStatus: HealthStatus? /// The image used for the container. public let image: String? @@ -969,8 +983,8 @@ extension ECS { public let networkBindings: [NetworkBinding]? /// The network interfaces associated with the container. public let networkInterfaces: [NetworkInterface]? - /// A short (255 max characters) human-readable string to provide additional details about a running or - /// stopped container. + /// A short (255 max characters) human-readable string to provide additional details about + /// a running or stopped container. public let reason: String? /// The ID of the Docker container. public let runtimeId: String? @@ -1020,300 +1034,344 @@ extension ECS { } public struct ContainerDefinition: AWSEncodableShape & AWSDecodableShape { - /// The command that's passed to the container. This parameter maps to Cmd in the docker - /// container create command and the COMMAND parameter to docker run. If there are multiple - /// arguments, each argument is a separated string in the array. + /// The command that's passed to the container. This parameter maps to Cmd in + /// the docker container create command and the COMMAND parameter to docker + /// run. If there are multiple arguments, each argument is a separated string in the + /// array. public let command: [String]? - /// The number of cpu units reserved for the container. This parameter maps to - /// CpuShares in the docker container create commandand the --cpu-shares - /// option to docker run. This field is optional for tasks using the Fargate launch type, and the only - /// requirement is that the total amount of CPU reserved for all containers within a task be lower than the - /// task-level cpu value. You can determine the number of CPU units that are available per EC2 instance type by multiplying - /// the vCPUs listed for that instance type on the Amazon EC2 Instances detail page by 1,024. Linux containers share unallocated CPU units with other containers on the container instance with the - /// same ratio as their allocated amount. For example, if you run a single-container task on a single-core - /// instance type with 512 CPU units specified for that container, and that's the only task running on the - /// container instance, that container could use the full 1,024 CPU unit share at any given time. However, - /// if you launched another copy of the same task on that container instance, each task is guaranteed a - /// minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the - /// other container was not using it. If both tasks were 100% active all of the time, they would be limited - /// to 512 CPU units. On Linux container instances, the Docker daemon on the container instance uses the CPU value to - /// calculate the relative CPU share ratios for running containers. The minimum valid CPU share value that - /// the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is - /// 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 - /// in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior - /// varies based on your Amazon ECS container agent version: Agent versions less than or equal to 1.1.0: Null and zero CPU - /// values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of - /// 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares. Agent versions greater than or equal to 1.2.0: Null, zero, and - /// CPU values of 1 are passed to Docker as 2. Agent versions greater than or equal to 1.84.0: CPU values - /// greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU - /// shares. On Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows - /// containers only have access to the specified amount of CPU that's described in the task definition. A - /// null or zero CPU value is passed to Docker as 0, which Windows interprets as 1% of one - /// CPU. + /// The number of cpu units reserved for the container. This parameter maps + /// to CpuShares in the docker container create commandand the + /// --cpu-shares option to docker run. This field is optional for tasks using the Fargate launch type, and the + /// only requirement is that the total amount of CPU reserved for all containers within a + /// task be lower than the task-level cpu value. You can determine the number of CPU units that are available per EC2 instance type + /// by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page + /// by 1,024. Linux containers share unallocated CPU units with other containers on the container + /// instance with the same ratio as their allocated amount. For example, if you run a + /// single-container task on a single-core instance type with 512 CPU units specified for + /// that container, and that's the only task running on the container instance, that + /// container could use the full 1,024 CPU unit share at any given time. However, if you + /// launched another copy of the same task on that container instance, each task is + /// guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float + /// to higher CPU usage if the other container was not using it. If both tasks were 100% + /// active all of the time, they would be limited to 512 CPU units. On Linux container instances, the Docker daemon on the container instance uses the CPU + /// value to calculate the relative CPU share ratios for running containers. The minimum + /// valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share + /// value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, + /// and you can use CPU values below 2 or above 262144 in your container definitions. For + /// CPU values below 2 (including null) or above 262144, the behavior varies based on your + /// Amazon ECS container agent version: Agent versions less than or equal to 1.1.0: + /// Null and zero CPU values are passed to Docker as 0, which Docker then converts + /// to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux + /// kernel converts to two CPU shares. Agent versions greater than or equal to 1.2.0: + /// Null, zero, and CPU values of 1 are passed to Docker as 2. Agent versions greater than or equal to + /// 1.84.0: CPU values greater than 256 vCPU are passed to Docker as + /// 256, which is equivalent to 262144 CPU shares. On Windows container instances, the CPU limit is enforced as an absolute limit, or a + /// quota. Windows containers only have access to the specified amount of CPU that's + /// described in the task definition. A null or zero CPU value is passed to Docker as + /// 0, which Windows interprets as 1% of one CPU. public let cpu: Int? - /// A list of ARNs in SSM or Amazon S3 to a credential spec (CredSpec) file that configures the - /// container for Active Directory authentication. We recommend that you use this parameter instead of the - /// dockerSecurityOptions. The maximum number of ARNs is 1. There are two formats for each ARN. credentialspecdomainless:MyARN You use credentialspecdomainless:MyARN to provide a CredSpec - /// with an additional section for a secret in Secrets Manager. You provide the login credentials to the - /// domain in the secret. Each task that runs on any container instance can join different domains. You can use this format without joining the container instance to a domain. credentialspec:MyARN You use credentialspec:MyARN to provide a CredSpec for a single - /// domain. You must join the container instance to the domain before you start any tasks that use - /// this task definition. In both formats, replace MyARN with the ARN in SSM or Amazon S3. If you provide a credentialspecdomainless:MyARN, the credspec must provide - /// a ARN in Secrets Manager for a secret containing the username, password, and the domain to connect to. For better - /// security, the instance isn't joined to the domain for domainless authentication. Other applications on - /// the instance can't use the domainless credentials. You can use this parameter to run tasks on the same - /// instance, even it the tasks need to join different domains. For more information, see Using gMSAs for - /// Windows Containers and Using gMSAs for Linux - /// Containers. + /// A list of ARNs in SSM or Amazon S3 to a credential spec (CredSpec) file that + /// configures the container for Active Directory authentication. We recommend that you use + /// this parameter instead of the dockerSecurityOptions. The maximum number of + /// ARNs is 1. There are two formats for each ARN. credentialspecdomainless:MyARN You use credentialspecdomainless:MyARN to provide a + /// CredSpec with an additional section for a secret in Secrets Manager. + /// You provide the login credentials to the domain in the secret. Each task that runs on any container instance can join different + /// domains. You can use this format without joining the container instance to a + /// domain. credentialspec:MyARN You use credentialspec:MyARN to provide a + /// CredSpec for a single domain. You must join the container instance to the domain before you start any + /// tasks that use this task definition. In both formats, replace MyARN with the ARN in SSM or Amazon S3. If you provide a credentialspecdomainless:MyARN, the + /// credspec must provide a ARN in Secrets Manager for a secret containing the + /// username, password, and the domain to connect to. For better security, the instance + /// isn't joined to the domain for domainless authentication. Other applications on the + /// instance can't use the domainless credentials. You can use this parameter to run tasks + /// on the same instance, even it the tasks need to join different domains. For more + /// information, see Using gMSAs for Windows + /// Containers and Using gMSAs for Linux + /// Containers. public let credentialSpecs: [String]? - /// The dependencies defined for container startup and shutdown. A container can contain multiple - /// dependencies on other containers in a task definition. When a dependency is defined for container - /// startup, for container shutdown it is reversed. For tasks using the EC2 launch type, the container instances require at least version - /// 1.26.0 of the container agent to turn on container dependencies. However, we recommend using the latest - /// container agent version. For information about checking your agent version and updating to the latest - /// version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an - /// Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If - /// your container instances are launched from version 20190301 or later, then they contain - /// the required versions of the container agent and ecs-init. For more information, see - /// Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide. For tasks using the Fargate launch type, the task or service requires the following - /// platforms: Linux platform version 1.3.0 or later. Windows platform version 1.0.0 or later. + /// The dependencies defined for container startup and shutdown. A container can contain + /// multiple dependencies on other containers in a task definition. When a dependency is + /// defined for container startup, for container shutdown it is reversed. For tasks using the EC2 launch type, the container instances require at + /// least version 1.26.0 of the container agent to turn on container dependencies. However, + /// we recommend using the latest container agent version. For information about checking + /// your agent version and updating to the latest version, see Updating the Amazon ECS + /// Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, + /// your instance needs at least version 1.26.0-1 of the ecs-init package. If + /// your container instances are launched from version 20190301 or later, then + /// they contain the required versions of the container agent and ecs-init. For + /// more information, see Amazon ECS-optimized Linux AMI + /// in the Amazon Elastic Container Service Developer Guide. For tasks using the Fargate launch type, the task or service requires + /// the following platforms: Linux platform version 1.3.0 or later. Windows platform version 1.0.0 or later. public let dependsOn: [ContainerDependency]? - /// When this parameter is true, networking is off within the container. This parameter maps to - /// NetworkDisabled in the docker container create command. This parameter is not supported for Windows containers. + /// When this parameter is true, networking is off within the container. This parameter + /// maps to NetworkDisabled in the docker container create command. This parameter is not supported for Windows containers. public let disableNetworking: Bool? - /// A list of DNS search domains that are presented to the container. This parameter maps to - /// DnsSearch in the docker container create command and the --dns-search - /// option to docker run. This parameter is not supported for Windows containers. + /// A list of DNS search domains that are presented to the container. This parameter maps + /// to DnsSearch in the docker container create command and the + /// --dns-search option to docker run. This parameter is not supported for Windows containers. public let dnsSearchDomains: [String]? - /// A list of DNS servers that are presented to the container. This parameter maps to Dns in - /// the docker container create command and the --dns option to docker run. This parameter is not supported for Windows containers. + /// A list of DNS servers that are presented to the container. This parameter maps to + /// Dns in the docker container create command and the --dns + /// option to docker run. This parameter is not supported for Windows containers. public let dnsServers: [String]? - /// A key/value map of labels to add to the container. This parameter maps to Labels in the - /// docker container create command and the --label option to docker run. - /// This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}' + /// A key/value map of labels to add to the container. This parameter maps to + /// Labels in the docker container create command and the + /// --label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}' public let dockerLabels: [String: String]? - /// A list of strings to provide custom configuration for multiple security systems. This field isn't - /// valid for containers in tasks using the Fargate launch type. For Linux tasks on EC2, this parameter can be used to reference custom labels for - /// SELinux and AppArmor multi-level security systems. For any tasks on EC2, this parameter can be used to reference a credential spec file - /// that configures a container for Active Directory authentication. For more information, see Using gMSAs for - /// Windows Containers and Using gMSAs for Linux Containers in - /// the Amazon Elastic Container Service Developer Guide. This parameter maps to SecurityOpt in the docker container create command and the - /// --security-opt option to docker run. The Amazon ECS container agent running on a container instance must register with the - /// ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment - /// variables before containers placed on that instance can use these security options. For more - /// information, see Amazon ECS Container Agent - /// Configuration in the Amazon Elastic Container Service Developer Guide. Valid values: "no-new-privileges" | "apparmor:PROFILE" | "label:value" | + /// A list of strings to provide custom configuration for multiple security systems. This + /// field isn't valid for containers in tasks using the Fargate launch + /// type. For Linux tasks on EC2, this parameter can be used to reference custom + /// labels for SELinux and AppArmor multi-level security systems. For any tasks on EC2, this parameter can be used to reference a + /// credential spec file that configures a container for Active Directory authentication. + /// For more information, see Using gMSAs for Windows + /// Containers and Using gMSAs for Linux + /// Containers in the Amazon Elastic Container Service Developer Guide. This parameter maps to SecurityOpt in the docker container create command + /// and the --security-opt option to docker run. The Amazon ECS container agent running on a container instance must register with the + /// ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true + /// environment variables before containers placed on that instance can use these + /// security options. For more information, see Amazon ECS Container + /// Agent Configuration in the Amazon Elastic Container Service Developer Guide. Valid values: "no-new-privileges" | "apparmor:PROFILE" | "label:value" | /// "credentialspec:CredentialSpecFilePath" public let dockerSecurityOptions: [String]? - /// Early versions of the Amazon ECS container agent don't properly handle entryPoint - /// parameters. If you have problems using entryPoint, update your container agent or - /// enter your commands and arguments as command array items instead. The entry point that's passed to the container. This parameter maps to Entrypoint in the - /// docker container create command and the --entrypoint option to docker run. + /// Early versions of the Amazon ECS container agent don't properly handle + /// entryPoint parameters. If you have problems using + /// entryPoint, update your container agent or enter your commands and + /// arguments as command array items instead. The entry point that's passed to the container. This parameter maps to + /// Entrypoint in the docker container create command and the + /// --entrypoint option to docker run. public let entryPoint: [String]? - /// The environment variables to pass to a container. This parameter maps to Env in the - /// docker container create command and the --env option to docker run. We don't recommend that you use plaintext environment variables for sensitive information, such - /// as credential data. + /// The environment variables to pass to a container. This parameter maps to + /// Env in the docker container create command and the --env + /// option to docker run. We don't recommend that you use plaintext environment variables for sensitive + /// information, such as credential data. public let environment: [KeyValuePair]? - /// A list of files containing the environment variables to pass to a container. This parameter maps to - /// the --env-file option to docker run. You can specify up to ten environment files. The file must have a .env file extension. - /// Each line in an environment file contains an environment variable in VARIABLE=VALUE - /// format. Lines beginning with # are treated as comments and are ignored. If there are environment variables specified using the environment parameter in a - /// container definition, they take precedence over the variables contained within an environment file. If - /// multiple environment files are specified that contain the same variable, they're processed from the top - /// down. We recommend that you use unique variable names. For more information, see Specifying - /// Environment Variables in the Amazon Elastic Container Service Developer Guide. + /// A list of files containing the environment variables to pass to a container. This + /// parameter maps to the --env-file option to docker run. You can specify up to ten environment files. The file must have a .env + /// file extension. Each line in an environment file contains an environment variable in + /// VARIABLE=VALUE format. Lines beginning with # are treated + /// as comments and are ignored. If there are environment variables specified using the environment + /// parameter in a container definition, they take precedence over the variables contained + /// within an environment file. If multiple environment files are specified that contain the + /// same variable, they're processed from the top down. We recommend that you use unique + /// variable names. For more information, see Specifying Environment + /// Variables in the Amazon Elastic Container Service Developer Guide. public let environmentFiles: [EnvironmentFile]? - /// If the essential parameter of a container is marked as true, and that - /// container fails or stops for any reason, all other containers that are part of the task are stopped. If - /// the essential parameter of a container is marked as false, its failure - /// doesn't affect the rest of the containers in a task. If this parameter is omitted, a container is - /// assumed to be essential. All tasks must have at least one essential container. If you have an application that's composed of - /// multiple containers, group containers that are used for a common purpose into components, and separate - /// the different components into multiple task definitions. For more information, see Application Architecture in the Amazon Elastic Container Service Developer Guide. + /// If the essential parameter of a container is marked as true, + /// and that container fails or stops for any reason, all other containers that are part of + /// the task are stopped. If the essential parameter of a container is marked + /// as false, its failure doesn't affect the rest of the containers in a task. + /// If this parameter is omitted, a container is assumed to be essential. All tasks must have at least one essential container. If you have an application + /// that's composed of multiple containers, group containers that are used for a common + /// purpose into components, and separate the different components into multiple task + /// definitions. For more information, see Application + /// Architecture in the Amazon Elastic Container Service Developer Guide. public let essential: Bool? - /// A list of hostnames and IP address mappings to append to the /etc/hosts file on the - /// container. This parameter maps to ExtraHosts in the docker container create command and - /// the --add-host option to docker run. This parameter isn't supported for Windows containers or tasks that use the awsvpc - /// network mode. + /// A list of hostnames and IP address mappings to append to the /etc/hosts + /// file on the container. This parameter maps to ExtraHosts in the docker + /// container create command and the --add-host option to docker run. This parameter isn't supported for Windows containers or tasks that use the + /// awsvpc network mode. public let extraHosts: [HostEntry]? - /// The FireLens configuration for the container. This is used to specify and configure a log router for - /// container logs. For more information, see Custom Log Routing in the - /// Amazon Elastic Container Service Developer Guide. + /// The FireLens configuration for the container. This is used to specify and configure a + /// log router for container logs. For more information, see Custom Log Routing + /// in the Amazon Elastic Container Service Developer Guide. public let firelensConfiguration: FirelensConfiguration? - /// The container health check command and associated configuration parameters for the container. This - /// parameter maps to HealthCheck in the docker container create command and the - /// HEALTHCHECK parameter of docker run. + /// The container health check command and associated configuration parameters for the + /// container. This parameter maps to HealthCheck in the docker container + /// create command and the HEALTHCHECK parameter of docker run. public let healthCheck: HealthCheck? - /// The hostname to use for your container. This parameter maps to Hostname in the docker - /// container create command and the --hostname option to docker run. The hostname parameter is not supported if you're using the awsvpc - /// network mode. + /// The hostname to use for your container. This parameter maps to Hostname + /// in the docker container create command and the --hostname option to docker + /// run. The hostname parameter is not supported if you're using the + /// awsvpc network mode. public let hostname: String? - /// The image used to start a container. This string is passed directly to the Docker daemon. By default, - /// images in the Docker Hub registry are available. Other repositories are specified with either repository-url/image:tag or repository-url/image@digest . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the docker container create - /// command and the IMAGE parameter of docker run. When a new task starts, the Amazon ECS container agent pulls the latest version of the specified - /// image and tag for the container to use. However, subsequent updates to a repository image - /// aren't propagated to already running tasks. Images in Amazon ECR repositories can be specified by either using the full - /// registry/repository:tag or registry/repository@digest. For - /// example, + /// The image used to start a container. This string is passed directly to the Docker + /// daemon. By default, images in the Docker Hub registry are available. Other repositories + /// are specified with either repository-url/image:tag or repository-url/image@digest . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the docker + /// container create command and the IMAGE parameter of docker run. When a new task starts, the Amazon ECS container agent pulls the latest version of + /// the specified image and tag for the container to use. However, subsequent + /// updates to a repository image aren't propagated to already running tasks. Images in Amazon ECR repositories can be specified by either using the full + /// registry/repository:tag or + /// registry/repository@digest. For example, /// 012345678910.dkr.ecr..amazonaws.com/:latest /// or /// 012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE. /// Images in official repositories on Docker Hub use a single name (for example, - /// ubuntu or mongo). Images in other repositories on Docker Hub are qualified with an organization name (for - /// example, amazon/amazon-ecs-agent). Images in other online repositories are qualified further by a domain name (for example, - /// quay.io/assemblyline/ubuntu). + /// ubuntu or mongo). Images in other repositories on Docker Hub are qualified with an organization + /// name (for example, amazon/amazon-ecs-agent). Images in other online repositories are qualified further by a domain name + /// (for example, quay.io/assemblyline/ubuntu). public let image: String? - /// When this parameter is true, you can deploy containerized applications that require - /// stdin or a tty to be allocated. This parameter maps to - /// OpenStdin in the docker container create command and the --interactive - /// option to docker run. + /// When this parameter is true, you can deploy containerized applications + /// that require stdin or a tty to be allocated. This parameter + /// maps to OpenStdin in the docker container create command and the + /// --interactive option to docker run. public let interactive: Bool? - /// The links parameter allows containers to communicate with each other without the need - /// for port mappings. This parameter is only supported if the network mode of a task definition is - /// bridge. The name:internalName construct is analogous to - /// name:alias in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to - /// Links in the docker container create command and the --link option to - /// docker run. This parameter is not supported for Windows containers. Containers that are collocated on a single container instance may be able to communicate with - /// each other without requiring links or host port mappings. Network isolation is achieved on the - /// container instance using security groups and VPC settings. + /// The links parameter allows containers to communicate with each other + /// without the need for port mappings. This parameter is only supported if the network mode + /// of a task definition is bridge. The name:internalName + /// construct is analogous to name:alias in Docker links. + /// Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to Links in the docker + /// container create command and the --link option to docker run. This parameter is not supported for Windows containers. Containers that are collocated on a single container instance may be able to + /// communicate with each other without requiring links or host port mappings. Network + /// isolation is achieved on the container instance using security groups and VPC + /// settings. public let links: [String]? - /// Linux-specific modifications that are applied to the container, such as Linux kernel capabilities. - /// For more information see KernelCapabilities. This parameter is not supported for Windows containers. + /// Linux-specific modifications that are applied to the container, such as Linux kernel + /// capabilities. For more information see KernelCapabilities. This parameter is not supported for Windows containers. public let linuxParameters: LinuxParameters? - /// The log configuration specification for the container. This parameter maps to LogConfig in the docker container create command and the - /// --log-driver option to docker run. By default, containers use the same logging driver - /// that the Docker daemon uses. However the container can use a different logging driver than the Docker - /// daemon by specifying a log driver with this parameter in the container definition. To use a different - /// logging driver for a container, the log system must be configured properly on the container instance - /// (or on a different log server for remote logging options). Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in - /// the LogConfiguration data type). Additional log drivers may be available in future - /// releases of the Amazon ECS container agent. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}' The Amazon ECS container agent running on a container instance must register the logging drivers - /// available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable - /// before containers placed on that instance can use these log configuration options. For more - /// information, see Amazon ECS Container Agent - /// Configuration in the Amazon Elastic Container Service Developer Guide. + /// The log configuration specification for the container. This parameter maps to LogConfig in the docker container create command + /// and the --log-driver option to docker run. By default, containers use the + /// same logging driver that the Docker daemon uses. However the container can use a + /// different logging driver than the Docker daemon by specifying a log driver with this + /// parameter in the container definition. To use a different logging driver for a + /// container, the log system must be configured properly on the container instance (or on a + /// different log server for remote logging options). Amazon ECS currently supports a subset of the logging drivers available to the Docker + /// daemon (shown in the LogConfiguration data type). Additional log drivers may be available in + /// future releases of the Amazon ECS container agent. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}' The Amazon ECS container agent running on a container instance must register the + /// logging drivers available on that instance with the + /// ECS_AVAILABLE_LOGGING_DRIVERS environment variable before + /// containers placed on that instance can use these log configuration options. For more + /// information, see Amazon ECS Container + /// Agent Configuration in the Amazon Elastic Container Service Developer Guide. public let logConfiguration: LogConfiguration? - /// The amount (in MiB) of memory to present to the container. If your container attempts to exceed the - /// memory specified here, the container is killed. The total amount of memory reserved for all containers - /// within a task must be lower than the task memory value, if one is specified. This - /// parameter maps to Memory in the docker container create command and the - /// --memory option to docker run. If using the Fargate launch type, this parameter is optional. If using the EC2 launch type, you must specify either a task-level memory value or a - /// container-level memory value. If you specify both a container-level memory and - /// memoryReservation value, memory must be greater than - /// memoryReservation. If you specify memoryReservation, then that value is - /// subtracted from the available memory resources for the container instance where the container is - /// placed. Otherwise, the value of memory is used. The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't - /// specify less than 6 MiB of memory for your containers. The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, - /// don't specify less than 4 MiB of memory for your containers. + /// The amount (in MiB) of memory to present to the container. If your container attempts + /// to exceed the memory specified here, the container is killed. The total amount of memory + /// reserved for all containers within a task must be lower than the task + /// memory value, if one is specified. This parameter maps to + /// Memory in the docker container create command and the + /// --memory option to docker run. If using the Fargate launch type, this parameter is optional. If using the EC2 launch type, you must specify either a task-level + /// memory value or a container-level memory value. If you specify both a container-level + /// memory and memoryReservation value, memory + /// must be greater than memoryReservation. If you specify + /// memoryReservation, then that value is subtracted from the available + /// memory resources for the container instance where the container is placed. Otherwise, + /// the value of memory is used. The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a + /// container. So, don't specify less than 6 MiB of memory for your containers. The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a + /// container. So, don't specify less than 4 MiB of memory for your containers. public let memory: Int? - /// The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy - /// contention, Docker attempts to keep the container memory to this soft limit. However, your container - /// can consume more memory when it needs to, up to either the hard limit specified with the - /// memory parameter (if applicable), or all of the available memory on the container - /// instance, whichever comes first. This parameter maps to MemoryReservation in the docker - /// container create command and the --memory-reservation option to docker run. If a task-level memory value is not specified, you must specify a non-zero integer for one or both of - /// memory or memoryReservation in a container definition. If you specify - /// both, memory must be greater than memoryReservation. If you specify - /// memoryReservation, then that value is subtracted from the available memory resources - /// for the container instance where the container is placed. Otherwise, the value of memory - /// is used. For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of - /// memory for short periods of time, you can set a memoryReservation of 128 MiB, and a - /// memory hard limit of 300 MiB. This configuration would allow the container to only - /// reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the - /// container to consume more memory resources when needed. The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't - /// specify less than 6 MiB of memory for your containers. The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, - /// don't specify less than 4 MiB of memory for your containers. + /// The soft limit (in MiB) of memory to reserve for the container. When system memory is + /// under heavy contention, Docker attempts to keep the container memory to this soft limit. + /// However, your container can consume more memory when it needs to, up to either the hard + /// limit specified with the memory parameter (if applicable), or all of the + /// available memory on the container instance, whichever comes first. This parameter maps + /// to MemoryReservation in the docker container create command and the + /// --memory-reservation option to docker run. If a task-level memory value is not specified, you must specify a non-zero integer for + /// one or both of memory or memoryReservation in a container + /// definition. If you specify both, memory must be greater than + /// memoryReservation. If you specify memoryReservation, then + /// that value is subtracted from the available memory resources for the container instance + /// where the container is placed. Otherwise, the value of memory is + /// used. For example, if your container normally uses 128 MiB of memory, but occasionally + /// bursts to 256 MiB of memory for short periods of time, you can set a + /// memoryReservation of 128 MiB, and a memory hard limit of + /// 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory + /// from the remaining resources on the container instance, but also allow the container to + /// consume more memory resources when needed. The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a + /// container. So, don't specify less than 6 MiB of memory for your containers. The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a + /// container. So, don't specify less than 4 MiB of memory for your containers. public let memoryReservation: Int? - /// The mount points for data volumes in your container. This parameter maps to Volumes in the docker container create command and the - /// --volume option to docker run. Windows containers can mount whole directories on the same drive as $env:ProgramData. - /// Windows containers can't mount directories on a different drive, and mount point can't be across - /// drives. + /// The mount points for data volumes in your container. This parameter maps to Volumes in the docker container create command and + /// the --volume option to docker run. Windows containers can mount whole directories on the same drive as + /// $env:ProgramData. Windows containers can't mount directories on a + /// different drive, and mount point can't be across drives. public let mountPoints: [MountPoint]? - /// The name of a container. If you're linking multiple containers together in a task definition, the - /// name of one container can be entered in the links of another container to - /// connect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to name in the docker - /// container create command and the --name option to docker run. + /// The name of a container. If you're linking multiple containers together in a task + /// definition, the name of one container can be entered in the + /// links of another container to connect the containers. + /// Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to name in the docker container + /// create command and the --name option to docker run. public let name: String? - /// The list of port mappings for the container. Port mappings allow containers to access ports on the - /// host container instance to send or receive traffic. For task definitions that use the awsvpc network mode, only specify the - /// containerPort. The hostPort can be left blank or it must be the same - /// value as the containerPort. Port mappings on Windows use the NetNAT gateway address rather than - /// localhost. There's no loopback for port mappings on Windows, so you can't access a - /// container's mapped port from the host itself. This parameter maps to PortBindings in the the docker container create command and the - /// --publish option to docker run. If the network mode of a task definition is set to - /// none, then you can't specify port mappings. If the network mode of a task definition - /// is set to host, then host ports must either be undefined or they must match the container - /// port in the port mapping. After a task reaches the RUNNING status, manual and automatic host and container - /// port assignments are visible in the Network Bindings section of a - /// container description for a selected task in the Amazon ECS console. The assignments are also visible in - /// the networkBindings section DescribeTasks + /// The list of port mappings for the container. Port mappings allow containers to access + /// ports on the host container instance to send or receive traffic. For task definitions that use the awsvpc network mode, only specify the + /// containerPort. The hostPort can be left blank or it must + /// be the same value as the containerPort. Port mappings on Windows use the NetNAT gateway address rather than + /// localhost. There's no loopback for port mappings on Windows, so you + /// can't access a container's mapped port from the host itself. This parameter maps to PortBindings in the the docker container create + /// command and the --publish option to docker run. If the network mode of a + /// task definition is set to none, then you can't specify port mappings. If + /// the network mode of a task definition is set to host, then host ports must + /// either be undefined or they must match the container port in the port mapping. After a task reaches the RUNNING status, manual and automatic host + /// and container port assignments are visible in the Network + /// Bindings section of a container description for a selected task in + /// the Amazon ECS console. The assignments are also visible in the + /// networkBindings section DescribeTasks /// responses. public let portMappings: [PortMapping]? - /// When this parameter is true, the container is given elevated privileges on the host container - /// instance (similar to the root user). This parameter maps to Privileged in the - /// docker container create command and the --privileged option to docker run This parameter is not supported for Windows containers or tasks run on Fargate. + /// When this parameter is true, the container is given elevated privileges on the host + /// container instance (similar to the root user). This parameter maps to + /// Privileged in the docker container create command and the + /// --privileged option to docker run This parameter is not supported for Windows containers or tasks run on Fargate. public let privileged: Bool? - /// When this parameter is true, a TTY is allocated. This parameter maps to Tty - /// in the docker container create command and the --tty option to docker run. + /// When this parameter is true, a TTY is allocated. This parameter maps to + /// Tty in the docker container create command and the --tty + /// option to docker run. public let pseudoTerminal: Bool? - /// When this parameter is true, the container is given read-only access to its root file system. This - /// parameter maps to ReadonlyRootfs in the docker container create command and the - /// --read-only option to docker run. This parameter is not supported for Windows containers. + /// When this parameter is true, the container is given read-only access to its root file + /// system. This parameter maps to ReadonlyRootfs in the docker container + /// create command and the --read-only option to docker run. This parameter is not supported for Windows containers. public let readonlyRootFilesystem: Bool? /// The private repository authentication credentials to use. public let repositoryCredentials: RepositoryCredentials? - /// The type and amount of a resource to assign to a container. The only supported resource is a - /// GPU. + /// The type and amount of a resource to assign to a container. The only supported + /// resource is a GPU. public let resourceRequirements: [ResourceRequirement]? - /// The restart policy for a container. When you set up a restart policy, Amazon ECS can restart the container - /// without needing to replace the task. For more information, see Restart individual containers - /// in Amazon ECS tasks with container restart policies in the Amazon Elastic Container Service Developer Guide. + /// The restart policy for a container. When you set up a restart policy, Amazon ECS can + /// restart the container without needing to replace the task. For more information, see + /// Restart + /// individual containers in Amazon ECS tasks with container restart policies in the + /// Amazon Elastic Container Service Developer Guide. public let restartPolicy: ContainerRestartPolicy? - /// The secrets to pass to the container. For more information, see Specifying Sensitive - /// Data in the Amazon Elastic Container Service Developer Guide. + /// The secrets to pass to the container. For more information, see Specifying + /// Sensitive Data in the Amazon Elastic Container Service Developer Guide. public let secrets: [Secret]? - /// Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For - /// example, you specify two containers in a task definition with containerA having a dependency on - /// containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a - /// startTimeout value is specified for containerB and it doesn't reach the desired status - /// within that time then containerA gives up and not start. This results in the task transitioning to a - /// STOPPED state. When the ECS_CONTAINER_START_TIMEOUT container agent configuration variable is used, - /// it's enforced independently from this start timeout value. For tasks using the Fargate launch type, the task or service requires the following - /// platforms: Linux platform version 1.3.0 or later. Windows platform version 1.0.0 or later. For tasks using the EC2 launch type, your container instances require at least version - /// 1.26.0 of the container agent to use a container start timeout value. However, we - /// recommend using the latest container agent version. For information about checking your agent version - /// and updating to the latest version, see Updating the Amazon ECS Container - /// Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at - /// least version 1.26.0-1 of the ecs-init package. If your container instances - /// are launched from version 20190301 or later, then they contain the required versions of - /// the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the - /// Amazon Elastic Container Service Developer Guide. The valid values for Fargate are 2-120 seconds. + /// Time duration (in seconds) to wait before giving up on resolving dependencies for a + /// container. For example, you specify two containers in a task definition with containerA + /// having a dependency on containerB reaching a COMPLETE, + /// SUCCESS, or HEALTHY status. If a startTimeout + /// value is specified for containerB and it doesn't reach the desired status within that + /// time then containerA gives up and not start. This results in the task transitioning to a + /// STOPPED state. When the ECS_CONTAINER_START_TIMEOUT container agent configuration + /// variable is used, it's enforced independently from this start timeout value. For tasks using the Fargate launch type, the task or service requires + /// the following platforms: Linux platform version 1.3.0 or later. Windows platform version 1.0.0 or later. For tasks using the EC2 launch type, your container instances require at + /// least version 1.26.0 of the container agent to use a container start + /// timeout value. However, we recommend using the latest container agent version. For + /// information about checking your agent version and updating to the latest version, see + /// Updating the Amazon ECS + /// Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, + /// your instance needs at least version 1.26.0-1 of the ecs-init + /// package. If your container instances are launched from version 20190301 or + /// later, then they contain the required versions of the container agent and + /// ecs-init. For more information, see Amazon ECS-optimized Linux AMI + /// in the Amazon Elastic Container Service Developer Guide. The valid values for Fargate are 2-120 seconds. public let startTimeout: Int? - /// Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit - /// normally on its own. For tasks using the Fargate launch type, the task or service requires the following - /// platforms: Linux platform version 1.3.0 or later. Windows platform version 1.0.0 or later. For tasks that use the Fargate launch type, the max stop timeout value is 120 seconds and if the - /// parameter is not specified, the default value of 30 seconds is used. For tasks that use the EC2 launch type, if the stopTimeout parameter isn't - /// specified, the value set for the Amazon ECS container agent configuration variable - /// ECS_CONTAINER_STOP_TIMEOUT is used. If neither the stopTimeout parameter - /// or the ECS_CONTAINER_STOP_TIMEOUT agent configuration variable are set, then the default - /// values of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container - /// instances require at least version 1.26.0 of the container agent to use a container stop timeout value. - /// However, we recommend using the latest container agent version. For information about checking your - /// agent version and updating to the latest version, see Updating the Amazon ECS Container - /// Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at - /// least version 1.26.0-1 of the ecs-init package. If your container instances are launched - /// from version 20190301 or later, then they contain the required versions of the container - /// agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the - /// Amazon Elastic Container Service Developer Guide. The valid values for Fargate are 2-120 seconds. + /// Time duration (in seconds) to wait before the container is forcefully killed if it + /// doesn't exit normally on its own. For tasks using the Fargate launch type, the task or service requires + /// the following platforms: Linux platform version 1.3.0 or later. Windows platform version 1.0.0 or later. For tasks that use the Fargate launch type, the max stop timeout value is 120 + /// seconds and if the parameter is not specified, the default value of 30 seconds is + /// used. For tasks that use the EC2 launch type, if the stopTimeout + /// parameter isn't specified, the value set for the Amazon ECS container agent configuration + /// variable ECS_CONTAINER_STOP_TIMEOUT is used. If neither the + /// stopTimeout parameter or the ECS_CONTAINER_STOP_TIMEOUT + /// agent configuration variable are set, then the default values of 30 seconds for Linux + /// containers and 30 seconds on Windows containers are used. Your container instances + /// require at least version 1.26.0 of the container agent to use a container stop timeout + /// value. However, we recommend using the latest container agent version. For information + /// about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using + /// an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the + /// ecs-init package. If your container instances are launched from version + /// 20190301 or later, then they contain the required versions of the + /// container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide. The valid values for Fargate are 2-120 seconds. public let stopTimeout: Int? /// A list of namespaced kernel parameters to set in the container. This parameter maps to - /// Sysctls in the docker container create command and the --sysctl option to - /// docker run. For example, you can configure net.ipv4.tcp_keepalive_time setting to maintain - /// longer lived connections. + /// Sysctls in the docker container create command and the + /// --sysctl option to docker run. For example, you can configure + /// net.ipv4.tcp_keepalive_time setting to maintain longer lived + /// connections. public let systemControls: [SystemControl]? - /// A list of ulimits to set in the container. If a ulimit value is specified - /// in a task definition, it overrides the default values set by Docker. This parameter maps to - /// Ulimits in the docker container create command and the --ulimit option to - /// docker run. Valid naming values are displayed in the Ulimit data type. Amazon ECS tasks hosted on Fargate use the default + /// A list of ulimits to set in the container. If a ulimit value + /// is specified in a task definition, it overrides the default values set by Docker. This + /// parameter maps to Ulimits in the docker container create command and the + /// --ulimit option to docker run. Valid naming values are displayed in the + /// Ulimit data type. Amazon ECS tasks hosted on Fargate use the default /// resource limit values set by the operating system with the exception of /// the nofile resource limit parameter which Fargate /// overrides. The nofile resource limit sets a restriction on @@ -1321,24 +1379,27 @@ extension ECS { /// nofile soft limit is 65535 and the default hard limit /// is 65535. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}' This parameter is not supported for Windows containers. public let ulimits: [Ulimit]? - /// The user to use inside the container. This parameter maps to User in the docker - /// container create command and the --user option to docker run. When running tasks using the host network mode, don't run containers using the - /// root user (UID 0). We recommend using a non-root user for better security. You can specify the user using the following formats. If specifying a UID or GID, you - /// must specify it as a positive integer. user user:group uid uid:gid user:gid uid:group This parameter is not supported for Windows containers. + /// The user to use inside the container. This parameter maps to User in the + /// docker container create command and the --user option to docker run. When running tasks using the host network mode, don't run containers + /// using the root user (UID 0). We recommend using a non-root user for better + /// security. You can specify the user using the following formats. If specifying a UID + /// or GID, you must specify it as a positive integer. user user:group uid uid:gid user:gid uid:group This parameter is not supported for Windows containers. public let user: String? - /// Specifies whether Amazon ECS will resolve the container image tag - /// provided in the container definition to an image digest. By default, the - /// value is enabled. If you set the value for a container as - /// disabled, Amazon ECS will not resolve the provided container image tag - /// to a digest and will use the original image URI specified in the container definition for deployment. - /// For more information about container image resolution, see Container image resolution in the Amazon ECS Developer Guide. + /// Specifies whether Amazon ECS will resolve the container image tag provided in the container + /// definition to an image digest. By default, the value is enabled. If you set + /// the value for a container as disabled, Amazon ECS will not resolve the provided + /// container image tag to a digest and will use the original image URI specified in the + /// container definition for deployment. For more information about container image + /// resolution, see Container image resolution in the Amazon ECS Developer + /// Guide. public let versionConsistency: VersionConsistency? - /// Data volumes to mount from another container. This parameter maps to VolumesFrom in the - /// docker container create command and the --volumes-from option to docker run. + /// Data volumes to mount from another container. This parameter maps to + /// VolumesFrom in the docker container create command and the + /// --volumes-from option to docker run. public let volumesFrom: [VolumeFrom]? /// The working directory to run commands inside the container in. This parameter maps to - /// WorkingDir in the docker container create command and the --workdir - /// option to docker run. + /// WorkingDir in the docker container create command and the + /// --workdir option to docker run. public let workingDirectory: String? @inlinable @@ -1434,18 +1495,19 @@ extension ECS { } public struct ContainerDependency: AWSEncodableShape & AWSDecodableShape { - /// The dependency condition of the container. The following are the available conditions and their - /// behavior: START - This condition emulates the behavior of links and volumes today. - /// It validates that a dependent container is started before permitting other containers to - /// start. COMPLETE - This condition validates that a dependent container runs to - /// completion (exits) before permitting other containers to start. This can be useful for - /// nonessential containers that run a script and then exit. This condition can't be set on an - /// essential container. SUCCESS - This condition is the same as COMPLETE, but it also - /// requires that the container exits with a zero status. This condition can't be set - /// on an essential container. HEALTHY - This condition validates that the dependent container passes its - /// Docker health check before permitting other containers to start. This requires that the - /// dependent container has health checks configured. This condition is confirmed only at task - /// startup. + /// The dependency condition of the container. The following are the available conditions + /// and their behavior: START - This condition emulates the behavior of links and + /// volumes today. It validates that a dependent container is started before + /// permitting other containers to start. COMPLETE - This condition validates that a dependent + /// container runs to completion (exits) before permitting other containers to + /// start. This can be useful for nonessential containers that run a script and then + /// exit. This condition can't be set on an essential container. SUCCESS - This condition is the same as + /// COMPLETE, but it also requires that the container exits with a + /// zero status. This condition can't be set on an essential + /// container. HEALTHY - This condition validates that the dependent + /// container passes its Docker health check before permitting other containers to + /// start. This requires that the dependent container has health checks configured. + /// This condition is confirmed only at task startup. public let condition: ContainerCondition /// The name of a container. public let containerName: String @@ -1485,76 +1547,83 @@ extension ECS { } public struct ContainerInstance: AWSDecodableShape { - /// This parameter returns true if the agent is connected to Amazon ECS. An instance with an - /// agent that may be unhealthy or stopped return false. Only instances connected to an agent - /// can accept task placement requests. + /// This parameter returns true if the agent is connected to Amazon ECS. An + /// instance with an agent that may be unhealthy or stopped return false. Only + /// instances connected to an agent can accept task placement requests. public let agentConnected: Bool? - /// The status of the most recent agent update. If an update wasn't ever requested, this value is - /// NULL. + /// The status of the most recent agent update. If an update wasn't ever requested, this + /// value is NULL. public let agentUpdateStatus: AgentUpdateStatus? - /// The resources attached to a container instance, such as an elastic network interface. + /// The resources attached to a container instance, such as an elastic network + /// interface. public let attachments: [Attachment]? - /// The attributes set for the container instance, either by the Amazon ECS container agent at instance - /// registration or manually with the PutAttributes operation. + /// The attributes set for the container instance, either by the Amazon ECS container agent at + /// instance registration or manually with the PutAttributes + /// operation. public let attributes: [Attribute]? /// The capacity provider that's associated with the container instance. public let capacityProviderName: String? - /// The Amazon Resource Name (ARN) of the container instance. For more information about the ARN format, see Amazon Resource Name (ARN) - /// in the Amazon ECS Developer Guide. + /// The Amazon Resource Name (ARN) of the container instance. For more information about the ARN format, + /// see Amazon Resource Name (ARN) in the Amazon ECS Developer Guide. public let containerInstanceArn: String? - /// The ID of the container instance. For Amazon EC2 instances, this value is the Amazon EC2 instance ID. For - /// external instances, this value is the Amazon Web Services Systems Manager managed instance ID. + /// The ID of the container instance. For Amazon EC2 instances, this value is the Amazon EC2 + /// instance ID. For external instances, this value is the Amazon Web Services Systems Manager managed instance ID. public let ec2InstanceId: String? /// An object representing the health status of the container instance. public let healthStatus: ContainerInstanceHealthStatus? - /// The number of tasks on the container instance that are in the PENDING status. + /// The number of tasks on the container instance that are in the PENDING + /// status. public let pendingTasksCount: Int? /// The Unix timestamp for the time when the container instance was registered. public let registeredAt: Date? - /// For CPU and memory resource types, this parameter describes the amount of each resource that was - /// available on the container instance when the container agent registered it with Amazon ECS. This value - /// represents the total amount of CPU and memory that can be allocated on this container instance to - /// tasks. For port resource types, this parameter describes the ports that were reserved by the Amazon ECS - /// container agent when it registered the container instance with Amazon ECS. + /// For CPU and memory resource types, this parameter describes the amount of each + /// resource that was available on the container instance when the container agent + /// registered it with Amazon ECS. This value represents the total amount of CPU and memory that + /// can be allocated on this container instance to tasks. For port resource types, this + /// parameter describes the ports that were reserved by the Amazon ECS container agent when it + /// registered the container instance with Amazon ECS. public let registeredResources: [Resource]? - /// For CPU and memory resource types, this parameter describes the remaining CPU and memory that wasn't - /// already allocated to tasks and is therefore available for new tasks. For port resource types, this - /// parameter describes the ports that were reserved by the Amazon ECS container agent (at instance registration - /// time) and any task containers that have reserved port mappings on the host (with the host - /// or bridge network mode). Any port that's not specified here is available for new - /// tasks. + /// For CPU and memory resource types, this parameter describes the remaining CPU and + /// memory that wasn't already allocated to tasks and is therefore available for new tasks. + /// For port resource types, this parameter describes the ports that were reserved by the + /// Amazon ECS container agent (at instance registration time) and any task containers that have + /// reserved port mappings on the host (with the host or bridge + /// network mode). Any port that's not specified here is available for new tasks. public let remainingResources: [Resource]? - /// The number of tasks on the container instance that have a desired status (desiredStatus) - /// of RUNNING. + /// The number of tasks on the container instance that have a desired status + /// (desiredStatus) of RUNNING. public let runningTasksCount: Int? /// The status of the container instance. The valid values are REGISTERING, /// REGISTRATION_FAILED, ACTIVE, INACTIVE, - /// DEREGISTERING, or DRAINING. If your account has opted in to the awsvpcTrunking account setting, then any newly - /// registered container instance will transition to a REGISTERING status while the trunk - /// elastic network interface is provisioned for the instance. If the registration fails, the instance will - /// transition to a REGISTRATION_FAILED status. You can describe the container instance and - /// see the reason for failure in the statusReason parameter. Once the container instance is - /// terminated, the instance transitions to a DEREGISTERING status while the trunk elastic - /// network interface is deprovisioned. The instance then transitions to an INACTIVE - /// status. The ACTIVE status indicates that the container instance can accept tasks. The - /// DRAINING indicates that new tasks aren't placed on the container instance and any - /// service tasks running on the container instance are removed if possible. For more information, see - /// Container instance draining in the Amazon Elastic Container Service Developer Guide. + /// DEREGISTERING, or DRAINING. If your account has opted in to the awsvpcTrunking account setting, then + /// any newly registered container instance will transition to a REGISTERING + /// status while the trunk elastic network interface is provisioned for the instance. If the + /// registration fails, the instance will transition to a REGISTRATION_FAILED + /// status. You can describe the container instance and see the reason for failure in the + /// statusReason parameter. Once the container instance is terminated, the + /// instance transitions to a DEREGISTERING status while the trunk elastic + /// network interface is deprovisioned. The instance then transitions to an + /// INACTIVE status. The ACTIVE status indicates that the container instance can accept tasks. + /// The DRAINING indicates that new tasks aren't placed on the container + /// instance and any service tasks running on the container instance are removed if + /// possible. For more information, see Container instance draining in the + /// Amazon Elastic Container Service Developer Guide. public let status: String? /// The reason that the container instance reached its current status. public let statusReason: String? - /// The metadata that you apply to the container instance to help you categorize and organize them. Each - /// tag consists of a key and an optional value. You define both. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. + /// The metadata that you apply to the container instance to help you categorize and + /// organize them. Each tag consists of a key and an optional value. You define both. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. public let tags: [Tag]? - /// The version counter for the container instance. Every time a container instance experiences a change - /// that triggers a CloudWatch event, the version counter is incremented. If you're replicating your Amazon ECS - /// container instance state with CloudWatch Events, you can compare the version of a container instance - /// reported by the Amazon ECS APIs with the version reported in CloudWatch Events for the container instance - /// (inside the detail object) to verify that the version in your event stream is + /// The version counter for the container instance. Every time a container instance + /// experiences a change that triggers a CloudWatch event, the version counter is + /// incremented. If you're replicating your Amazon ECS container instance state with CloudWatch + /// Events, you can compare the version of a container instance reported by the Amazon ECS APIs + /// with the version reported in CloudWatch Events for the container instance (inside the + /// detail object) to verify that the version in your event stream is /// current. public let version: Int64? - /// The version information for the Amazon ECS container agent and Docker daemon running on the container - /// instance. + /// The version information for the Amazon ECS container agent and Docker daemon running on the + /// container instance. public let versionInfo: VersionInfo? @inlinable @@ -1602,10 +1671,11 @@ extension ECS { } public struct ContainerInstanceHealthStatus: AWSDecodableShape { - /// An array of objects representing the details of the container instance health status. + /// An array of objects representing the details of the container instance health + /// status. public let details: [InstanceHealthCheckResult]? - /// The overall health status of the container instance. This is an aggregate status of all container - /// instance health checks. + /// The overall health status of the container instance. This is an aggregate status of + /// all container instance health checks. public let overallStatus: InstanceHealthCheckState? @inlinable @@ -1621,31 +1691,32 @@ extension ECS { } public struct ContainerOverride: AWSEncodableShape & AWSDecodableShape { - /// The command to send to the container that overrides the default command from the Docker image or the - /// task definition. You must also specify a container name. + /// The command to send to the container that overrides the default command from the + /// Docker image or the task definition. You must also specify a container name. public let command: [String]? - /// The number of cpu units reserved for the container, instead of the default value from - /// the task definition. You must also specify a container name. + /// The number of cpu units reserved for the container, instead of the + /// default value from the task definition. You must also specify a container name. public let cpu: Int? - /// The environment variables to send to the container. You can add new environment variables, which are - /// added to the container at launch, or you can override the existing environment variables from the - /// Docker image or the task definition. You must also specify a container name. + /// The environment variables to send to the container. You can add new environment + /// variables, which are added to the container at launch, or you can override the existing + /// environment variables from the Docker image or the task definition. You must also + /// specify a container name. public let environment: [KeyValuePair]? - /// A list of files containing the environment variables to pass to a container, instead of the value - /// from the container definition. + /// A list of files containing the environment variables to pass to a container, instead + /// of the value from the container definition. public let environmentFiles: [EnvironmentFile]? - /// The hard limit (in MiB) of memory to present to the container, instead of the default value from the - /// task definition. If your container attempts to exceed the memory specified here, the container is - /// killed. You must also specify a container name. + /// The hard limit (in MiB) of memory to present to the container, instead of the default + /// value from the task definition. If your container attempts to exceed the memory + /// specified here, the container is killed. You must also specify a container name. public let memory: Int? - /// The soft limit (in MiB) of memory to reserve for the container, instead of the default value from the - /// task definition. You must also specify a container name. + /// The soft limit (in MiB) of memory to reserve for the container, instead of the default + /// value from the task definition. You must also specify a container name. public let memoryReservation: Int? - /// The name of the container that receives the override. This parameter is required if any override is - /// specified. + /// The name of the container that receives the override. This parameter is required if + /// any override is specified. public let name: String? - /// The type and amount of a resource to assign to a container, instead of the default value from the - /// task definition. The only supported resource is a GPU. + /// The type and amount of a resource to assign to a container, instead of the default + /// value from the task definition. The only supported resource is a GPU. public let resourceRequirements: [ResourceRequirement]? @inlinable @@ -1675,14 +1746,17 @@ extension ECS { public struct ContainerRestartPolicy: AWSEncodableShape & AWSDecodableShape { /// Specifies whether a restart policy is enabled for the container. public let enabled: Bool - /// A list of exit codes that Amazon ECS will ignore and not attempt a restart on. You can specify a maximum - /// of 50 container exit codes. By default, Amazon ECS does not ignore any exit codes. + /// A list of exit codes that Amazon ECS will ignore and not attempt a restart on. You can + /// specify a maximum of 50 container exit codes. By default, Amazon ECS does not ignore any exit + /// codes. public let ignoredExitCodes: [Int]? - /// A period of time (in seconds) that the container must run for before a restart can be attempted. A - /// container can be restarted only once every restartAttemptPeriod seconds. If a container - /// isn't able to run for this time period and exits early, it will not be restarted. You can set a minimum - /// restartAttemptPeriod of 60 seconds and a maximum restartAttemptPeriod of - /// 1800 seconds. By default, a container must run for 300 seconds before it can be restarted. + /// A period of time (in seconds) that the container must run for before a restart can be + /// attempted. A container can be restarted only once every + /// restartAttemptPeriod seconds. If a container isn't able to run for this + /// time period and exits early, it will not be restarted. You can set a minimum + /// restartAttemptPeriod of 60 seconds and a maximum + /// restartAttemptPeriod of 1800 seconds. By default, a container must run + /// for 300 seconds before it can be restarted. public let restartAttemptPeriod: Int? @inlinable @@ -1702,7 +1776,8 @@ extension ECS { public struct ContainerStateChange: AWSEncodableShape { /// The name of the container. public let containerName: String? - /// The exit code for the container, if the state change is a result of the container exiting. + /// The exit code for the container, if the state change is a result of the container + /// exiting. public let exitCode: Int? /// The container image SHA 256 digest. public let imageDigest: String? @@ -1740,12 +1815,14 @@ extension ECS { public struct CreateCapacityProviderRequest: AWSEncodableShape { /// The details of the Auto Scaling group for the capacity provider. public let autoScalingGroupProvider: AutoScalingGroupProvider - /// The name of the capacity provider. Up to 255 characters are allowed. They include letters (both upper - /// and lowercase letters), numbers, underscores (_), and hyphens (-). The name can't be prefixed with - /// "aws", "ecs", or "fargate". + /// The name of the capacity provider. Up to 255 characters are allowed. They include + /// letters (both upper and lowercase letters), numbers, underscores (_), and hyphens (-). + /// The name can't be prefixed with "aws", "ecs", or + /// "fargate". public let name: String - /// The metadata that you apply to the capacity provider to categorize and organize them more - /// conveniently. Each tag consists of a key and an optional value. You define both of them. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. + /// The metadata that you apply to the capacity provider to categorize and organize them + /// more conveniently. Each tag consists of a key and an optional value. You define both of + /// them. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. public let tags: [Tag]? @inlinable @@ -1785,26 +1862,28 @@ extension ECS { } public struct CreateClusterRequest: AWSEncodableShape { - /// The short name of one or more capacity providers to associate with the cluster. A capacity provider - /// must be associated with a cluster before it can be included as part of the default capacity provider - /// strategy of the cluster or used in a capacity provider strategy when calling the CreateService or RunTask actions. If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must be - /// created but not associated with another cluster. New Auto Scaling group capacity providers can be - /// created with the CreateCapacityProvider - /// API operation. To use a Fargate capacity provider, specify either the FARGATE or - /// FARGATE_SPOT capacity providers. The Fargate capacity providers are available to all - /// accounts and only need to be associated with a cluster to be used. The PutCapacityProvider API operation is used to update the list of available capacity - /// providers for a cluster after the cluster is created. + /// The short name of one or more capacity providers to associate with the cluster. A + /// capacity provider must be associated with a cluster before it can be included as part of + /// the default capacity provider strategy of the cluster or used in a capacity provider + /// strategy when calling the CreateService or + /// RunTask actions. If specifying a capacity provider that uses an Auto Scaling group, the capacity + /// provider must be created but not associated with another cluster. New Auto Scaling group + /// capacity providers can be created with the CreateCapacityProvider API operation. To use a Fargate capacity provider, specify either the FARGATE or + /// FARGATE_SPOT capacity providers. The Fargate capacity providers are + /// available to all accounts and only need to be associated with a cluster to be + /// used. The PutCapacityProvider API operation is used to update the list of available + /// capacity providers for a cluster after the cluster is created. public let capacityProviders: [String]? - /// The name of your cluster. If you don't specify a name for your cluster, you create a cluster that's - /// named default. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. + /// The name of your cluster. If you don't specify a name for your cluster, you create a + /// cluster that's named default. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. public let clusterName: String? /// The execute command configuration for the cluster. public let configuration: ClusterConfiguration? - /// The capacity provider strategy to set as the default for the cluster. After a default capacity - /// provider strategy is set for a cluster, when you call the CreateService or RunTask APIs with - /// no capacity provider strategy or launch type specified, the default capacity provider strategy for the - /// cluster is used. If a default capacity provider strategy isn't defined for a cluster when it was created, it can be - /// defined later with the PutClusterCapacityProviders API operation. + /// The capacity provider strategy to set as the default for the cluster. After a default + /// capacity provider strategy is set for a cluster, when you call the CreateService or RunTask APIs with no + /// capacity provider strategy or launch type specified, the default capacity provider + /// strategy for the cluster is used. If a default capacity provider strategy isn't defined for a cluster when it was + /// created, it can be defined later with the PutClusterCapacityProviders API operation. public let defaultCapacityProviderStrategy: [CapacityProviderStrategyItem]? /// Use this parameter to set a default Service Connect namespace. After you set a default /// Service Connect namespace, any new services with Service Connect turned on that are created in the cluster are added as @@ -1818,12 +1897,12 @@ extension ECS { /// Only the tasks that Amazon ECS services create are supported with Service Connect. /// For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide. public let serviceConnectDefaults: ClusterServiceConnectDefaultsRequest? - /// The setting to use when creating a cluster. This parameter is used to turn on CloudWatch Container Insights - /// for a cluster. If this value is specified, it overrides the containerInsights value set - /// with PutAccountSetting or PutAccountSettingDefault. + /// The setting to use when creating a cluster. This parameter is used to turn on CloudWatch + /// Container Insights for a cluster. If this value is specified, it overrides the + /// containerInsights value set with PutAccountSetting or PutAccountSettingDefault. public let settings: [ClusterSetting]? - /// The metadata that you apply to the cluster to help you categorize and organize them. Each tag - /// consists of a key and an optional value. You define both. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. + /// The metadata that you apply to the cluster to help you categorize and organize them. + /// Each tag consists of a key and an optional value. You define both. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. public let tags: [Tag]? @inlinable @@ -1876,9 +1955,10 @@ extension ECS { /// Indicates whether to use Availability Zone rebalancing for the service. For more information, see Balancing an Amazon ECS service across Availability Zones in /// the Amazon Elastic Container Service Developer Guide. public let availabilityZoneRebalancing: AvailabilityZoneRebalancing? - /// The capacity provider strategy to use for the service. If a capacityProviderStrategy is specified, the launchType parameter must - /// be omitted. If no capacityProviderStrategy or launchType is specified, the - /// defaultCapacityProviderStrategy for the cluster is used. A capacity provider strategy may contain a maximum of 6 capacity providers. + /// The capacity provider strategy to use for the service. If a capacityProviderStrategy is specified, the launchType + /// parameter must be omitted. If no capacityProviderStrategy or + /// launchType is specified, the + /// defaultCapacityProviderStrategy for the cluster is used. A capacity provider strategy can contain a maximum of 20 capacity providers. public let capacityProviderStrategy: [CapacityProviderStrategyItem]? /// An identifier that you provide to ensure the idempotency of the request. It must be /// unique and is case sensitive. Up to 36 ASCII characters in the range of 33-126 (inclusive) are allowed. @@ -1886,114 +1966,132 @@ extension ECS { /// The short name or full Amazon Resource Name (ARN) of the cluster that you run your service on. /// If you do not specify a cluster, the default cluster is assumed. public let cluster: String? - /// Optional deployment parameters that control how many tasks run during the deployment and the ordering - /// of stopping and starting tasks. + /// Optional deployment parameters that control how many tasks run during the deployment + /// and the ordering of stopping and starting tasks. public let deploymentConfiguration: DeploymentConfiguration? - /// The deployment controller to use for the service. If no deployment controller is specified, the - /// default value of ECS is used. + /// The deployment controller to use for the service. If no deployment controller is + /// specified, the default value of ECS is used. public let deploymentController: DeploymentController? - /// The number of instantiations of the specified task definition to place and keep running in your - /// service. This is required if schedulingStrategy is REPLICA or isn't specified. If - /// schedulingStrategy is DAEMON then this isn't required. + /// The number of instantiations of the specified task definition to place and keep + /// running in your service. This is required if schedulingStrategy is REPLICA or isn't + /// specified. If schedulingStrategy is DAEMON then this isn't + /// required. public let desiredCount: Int? - /// Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more - /// information, see Tagging your Amazon ECS resources in the Amazon Elastic Container Service Developer Guide. When you use Amazon ECS managed tags, you need to set the propagateTags request - /// parameter. + /// Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For + /// more information, see Tagging your Amazon ECS + /// resources in the Amazon Elastic Container Service Developer Guide. When you use Amazon ECS managed tags, you need to set the propagateTags + /// request parameter. public let enableECSManagedTags: Bool? /// Determines whether the execute command functionality is turned on for the service. If - /// true, this enables execute command functionality on all containers in the service - /// tasks. + /// true, this enables execute command functionality on all containers in + /// the service tasks. public let enableExecuteCommand: Bool? - /// The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing, VPC Lattice, and container - /// health checks after a task has first started. If you don't specify a health check grace - /// period value, the default value of 0 is used. If you don't use any of the health checks, - /// then healthCheckGracePeriodSeconds is unused. If your service's tasks take a while to start and respond to health checks, you can specify a - /// health check grace period of up to 2,147,483,647 seconds (about 69 years). During that time, the Amazon ECS - /// service scheduler ignores health check status. This grace period can prevent the service scheduler from - /// marking tasks as unhealthy and stopping them before they have time to come up. + /// The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy + /// Elastic Load Balancing, VPC Lattice, and container health checks after a task has first started. If you don't + /// specify a health check grace period value, the default value of 0 is used. + /// If you don't use any of the health checks, then + /// healthCheckGracePeriodSeconds is unused. If your service's tasks take a while to start and respond to health checks, you can + /// specify a health check grace period of up to 2,147,483,647 seconds (about 69 years). + /// During that time, the Amazon ECS service scheduler ignores health check status. This grace + /// period can prevent the service scheduler from marking tasks as unhealthy and stopping + /// them before they have time to come up. public let healthCheckGracePeriodSeconds: Int? - /// The infrastructure that you run your service on. For more information, see Amazon ECS launch - /// types in the Amazon Elastic Container Service Developer Guide. The FARGATE launch type runs your tasks on Fargate On-Demand infrastructure. Fargate Spot infrastructure is available for use but a capacity provider strategy must be used. - /// For more information, see Fargate capacity providers in the Amazon ECS Developer Guide. The EC2 launch type runs your tasks on Amazon EC2 instances registered to your - /// cluster. The EXTERNAL launch type runs your tasks on your on-premises server or virtual machine - /// (VM) capacity registered to your cluster. A service can use either a launch type or a capacity provider strategy. If a launchType - /// is specified, the capacityProviderStrategy parameter must be omitted. + /// The infrastructure that you run your service on. For more information, see Amazon ECS + /// launch types in the Amazon Elastic Container Service Developer Guide. The FARGATE launch type runs your tasks on Fargate On-Demand + /// infrastructure. Fargate Spot infrastructure is available for use but a capacity provider + /// strategy must be used. For more information, see Fargate capacity providers in the Amazon ECS + /// Developer Guide. The EC2 launch type runs your tasks on Amazon EC2 instances registered to your + /// cluster. The EXTERNAL launch type runs your tasks on your on-premises server or + /// virtual machine (VM) capacity registered to your cluster. A service can use either a launch type or a capacity provider strategy. If a + /// launchType is specified, the capacityProviderStrategy + /// parameter must be omitted. public let launchType: LaunchType? - /// A load balancer object representing the load balancers to use with your service. For more - /// information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. If the service uses the rolling update (ECS) deployment controller and using either an - /// Application Load Balancer or Network Load Balancer, you must specify one or more target group ARNs to attach to the service. The - /// service-linked role is required for services that use multiple target groups. For more information, see - /// Using service-linked roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide. If the service uses the CODE_DEPLOY deployment controller, the service is required to - /// use either an Application Load Balancer or Network Load Balancer. When creating an CodeDeploy deployment group, you specify two target groups - /// (referred to as a targetGroupPair). During a deployment, CodeDeploy determines which task set - /// in your service has the status PRIMARY, and it associates one target group with it. Then, - /// it also associates the other target group with the replacement task set. The load balancer can also - /// have up to two listeners: a required listener for production traffic and an optional listener that you - /// can use to perform validation tests with Lambda functions before routing production traffic to - /// it. If you use the CODE_DEPLOY deployment controller, these values can be changed when - /// updating the service. For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN, the container - /// name, and the container port to access from the load balancer. The container name must be as it appears - /// in a container definition. The load balancer name parameter must be omitted. When a task from this - /// service is placed on a container instance, the container instance and port combination is registered as - /// a target in the target group that's specified here. For Classic Load Balancers, this object must contain the load balancer name, the container name , and the container - /// port to access from the load balancer. The container name must be as it appears in a container - /// definition. The target group ARN parameter must be omitted. When a task from this service is placed - /// on a container instance, the container instance is registered with the load balancer that's specified - /// here. Services with tasks that use the awsvpc network mode (for example, those with the - /// Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers aren't supported. Also, when - /// you create any target groups for these services, you must choose ip as the target type, - /// not instance. This is because tasks that use the awsvpc network mode are - /// associated with an elastic network interface, not an Amazon EC2 instance. + /// A load balancer object representing the load balancers to use with your service. For + /// more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. If the service uses the rolling update (ECS) deployment controller and + /// using either an Application Load Balancer or Network Load Balancer, you must specify one or more target group ARNs to attach + /// to the service. The service-linked role is required for services that use multiple + /// target groups. For more information, see Using service-linked roles for Amazon ECS in the + /// Amazon Elastic Container Service Developer Guide. If the service uses the CODE_DEPLOY deployment controller, the service is + /// required to use either an Application Load Balancer or Network Load Balancer. When creating an CodeDeploy deployment group, you + /// specify two target groups (referred to as a targetGroupPair). During a + /// deployment, CodeDeploy determines which task set in your service has the status + /// PRIMARY, and it associates one target group with it. Then, it also + /// associates the other target group with the replacement task set. The load balancer can + /// also have up to two listeners: a required listener for production traffic and an + /// optional listener that you can use to perform validation tests with Lambda functions + /// before routing production traffic to it. If you use the CODE_DEPLOY deployment controller, these values can be + /// changed when updating the service. For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN, + /// the container name, and the container port to access from the load balancer. The + /// container name must be as it appears in a container definition. The load balancer name + /// parameter must be omitted. When a task from this service is placed on a container + /// instance, the container instance and port combination is registered as a target in the + /// target group that's specified here. For Classic Load Balancers, this object must contain the load balancer name, the container name , and + /// the container port to access from the load balancer. The container name must be as it + /// appears in a container definition. The target group ARN parameter must be omitted. + /// When a task from this service is placed on a container instance, the container instance + /// is registered with the load balancer that's specified here. Services with tasks that use the awsvpc network mode (for example, those + /// with the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers + /// aren't supported. Also, when you create any target groups for these services, you must + /// choose ip as the target type, not instance. This is because + /// tasks that use the awsvpc network mode are associated with an elastic + /// network interface, not an Amazon EC2 instance. public let loadBalancers: [LoadBalancer]? - /// The network configuration for the service. This parameter is required for task definitions that use - /// the awsvpc network mode to receive their own elastic network interface, and it isn't - /// supported for other network modes. For more information, see Task networking in the - /// Amazon Elastic Container Service Developer Guide. + /// The network configuration for the service. This parameter is required for task + /// definitions that use the awsvpc network mode to receive their own elastic + /// network interface, and it isn't supported for other network modes. For more information, + /// see Task networking + /// in the Amazon Elastic Container Service Developer Guide. public let networkConfiguration: NetworkConfiguration? - /// An array of placement constraint objects to use for tasks in your service. You can specify a maximum - /// of 10 constraints for each task. This limit includes constraints in the task definition and those - /// specified at runtime. + /// An array of placement constraint objects to use for tasks in your service. You can + /// specify a maximum of 10 constraints for each task. This limit includes constraints in + /// the task definition and those specified at runtime. public let placementConstraints: [PlacementConstraint]? - /// The placement strategy objects to use for tasks in your service. You can specify a maximum of 5 - /// strategy rules for each service. + /// The placement strategy objects to use for tasks in your service. You can specify a + /// maximum of 5 strategy rules for each service. public let placementStrategy: [PlacementStrategy]? - /// The platform version that your tasks in the service are running on. A platform version is specified - /// only for tasks using the Fargate launch type. If one isn't specified, the - /// LATEST platform version is used. For more information, see Fargate platform versions in - /// the Amazon Elastic Container Service Developer Guide. + /// The platform version that your tasks in the service are running on. A platform version + /// is specified only for tasks using the Fargate launch type. If one isn't + /// specified, the LATEST platform version is used. For more information, see + /// Fargate platform + /// versions in the Amazon Elastic Container Service Developer Guide. public let platformVersion: String? - /// Specifies whether to propagate the tags from the task definition to the task. If no value is - /// specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To - /// add tags to a task after task creation, use the TagResource API action. You must set this to a value other than NONE when you use Cost Explorer. For more - /// information, see Amazon ECS usage reports in the Amazon Elastic Container Service Developer Guide. The default is NONE. + /// Specifies whether to propagate the tags from the task definition to the task. If no + /// value is specified, the tags aren't propagated. Tags can only be propagated to the task + /// during task creation. To add tags to a task after task creation, use the TagResource API action. You must set this to a value other than NONE when you use Cost Explorer. + /// For more information, see Amazon ECS usage reports + /// in the Amazon Elastic Container Service Developer Guide. The default is NONE. public let propagateTags: PropagateTags? - /// The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load balancer on - /// your behalf. This parameter is only permitted if you are using a load balancer with your service and - /// your task definition doesn't use the awsvpc network mode. If you specify the - /// role parameter, you must also specify a load balancer object with the - /// loadBalancers parameter. If your account has already created the Amazon ECS service-linked role, that role is used for your - /// service unless you specify a role here. The service-linked role is required if your task definition - /// uses the awsvpc network mode or if the service is configured to use service discovery, - /// an external deployment controller, multiple target groups, or Elastic Inference accelerators in - /// which case you don't specify a role here. For more information, see Using service-linked - /// roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide. If your specified role has a path other than /, then you must either specify the full - /// role ARN (this is recommended) or prefix the role name with the path. For example, if a role with the - /// name bar has a path of /foo/ then you would specify /foo/bar as - /// the role name. For more information, see Friendly names and - /// paths in the IAM User Guide. + /// The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your + /// load balancer on your behalf. This parameter is only permitted if you are using a load + /// balancer with your service and your task definition doesn't use the awsvpc + /// network mode. If you specify the role parameter, you must also specify a + /// load balancer object with the loadBalancers parameter. If your account has already created the Amazon ECS service-linked role, that role is + /// used for your service unless you specify a role here. The service-linked role is + /// required if your task definition uses the awsvpc network mode or if the + /// service is configured to use service discovery, an external deployment controller, + /// multiple target groups, or Elastic Inference accelerators in which case you don't + /// specify a role here. For more information, see Using + /// service-linked roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide. If your specified role has a path other than /, then you must either + /// specify the full role ARN (this is recommended) or prefix the role name with the path. + /// For example, if a role with the name bar has a path of /foo/ + /// then you would specify /foo/bar as the role name. For more information, see + /// Friendly names and paths in the IAM User + /// Guide. public let role: String? - /// The scheduling strategy to use for the service. For more information, see Services. There are two service scheduler strategies available: REPLICA-The replica scheduling strategy places and maintains the desired - /// number of tasks across your cluster. By default, the service scheduler spreads tasks across - /// Availability Zones. You can use task placement strategies and constraints to customize task - /// placement decisions. This scheduler strategy is required if the service uses the - /// CODE_DEPLOY or EXTERNAL deployment controller types. DAEMON-The daemon scheduling strategy deploys exactly one task on each - /// active container instance that meets all of the task placement constraints that you specify in - /// your cluster. The service scheduler also evaluates the task placement constraints for running - /// tasks and will stop tasks that don't meet the placement constraints. When you're using this - /// strategy, you don't need to specify a desired number of tasks, a task placement strategy, or - /// use Service Auto Scaling policies. Tasks using the Fargate launch type or the CODE_DEPLOY or - /// EXTERNAL deployment controller types don't support the DAEMON - /// scheduling strategy. + /// The scheduling strategy to use for the service. For more information, see Services. There are two service scheduler strategies available: REPLICA-The replica scheduling strategy places and + /// maintains the desired number of tasks across your cluster. By default, the + /// service scheduler spreads tasks across Availability Zones. You can use task + /// placement strategies and constraints to customize task placement decisions. This + /// scheduler strategy is required if the service uses the CODE_DEPLOY + /// or EXTERNAL deployment controller types. DAEMON-The daemon scheduling strategy deploys exactly one + /// task on each active container instance that meets all of the task placement + /// constraints that you specify in your cluster. The service scheduler also + /// evaluates the task placement constraints for running tasks and will stop tasks + /// that don't meet the placement constraints. When you're using this strategy, you + /// don't need to specify a desired number of tasks, a task placement strategy, or + /// use Service Auto Scaling policies. Tasks using the Fargate launch type or the + /// CODE_DEPLOY or EXTERNAL deployment controller + /// types don't support the DAEMON scheduling strategy. public let schedulingStrategy: SchedulingStrategy? /// The configuration for this service to discover and connect to /// services, and be discovered by, and connected from, other services within a namespace. Tasks that run in a namespace can use short names to connect @@ -2003,26 +2101,28 @@ extension ECS { /// Only the tasks that Amazon ECS services create are supported with Service Connect. /// For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide. public let serviceConnectConfiguration: ServiceConnectConfiguration? - /// The name of your service. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. Service names must be unique within a cluster, but - /// you can have similarly named services in multiple clusters within a Region or across multiple - /// Regions. + /// The name of your service. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. Service names must be unique within + /// a cluster, but you can have similarly named services in multiple clusters within a + /// Region or across multiple Regions. public let serviceName: String - /// The details of the service discovery registry to associate with this service. For more information, - /// see Service - /// discovery. Each service may be associated with one service registry. Multiple service registries for each - /// service isn't supported. + /// The details of the service discovery registry to associate with this service. For more + /// information, see Service + /// discovery. Each service may be associated with one service registry. Multiple service + /// registries for each service isn't supported. public let serviceRegistries: [ServiceRegistry]? - /// The metadata that you apply to the service to help you categorize and organize them. Each tag - /// consists of a key and an optional value, both of which you define. When a service is deleted, the tags - /// are deleted as well. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. + /// The metadata that you apply to the service to help you categorize and organize them. + /// Each tag consists of a key and an optional value, both of which you define. When a + /// service is deleted, the tags are deleted as well. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. public let tags: [Tag]? - /// The family and revision (family:revision) or full ARN of the - /// task definition to run in your service. If a revision isn't specified, the latest - /// ACTIVE revision is used. A task definition must be specified if the service uses either the ECS or - /// CODE_DEPLOY deployment controllers. For more information about deployment types, see Amazon ECS deployment types. + /// The family and revision (family:revision) or + /// full ARN of the task definition to run in your service. If a revision + /// isn't specified, the latest ACTIVE revision is used. A task definition must be specified if the service uses either the ECS or + /// CODE_DEPLOY deployment controllers. For more information about deployment types, see Amazon ECS deployment + /// types. public let taskDefinition: String? - /// The configuration for a volume specified in the task definition as a volume that is configured at - /// launch time. Currently, the only supported volume type is an Amazon EBS volume. + /// The configuration for a volume specified in the task definition as a volume that is + /// configured at launch time. Currently, the only supported volume type is an Amazon EBS + /// volume. public let volumeConfigurations: [ServiceVolumeConfiguration]? /// The VPC Lattice configuration for the service being created. public let vpcLatticeConfigurations: [VpcLatticeConfiguration]? @@ -2102,11 +2202,14 @@ extension ECS { } public struct CreateServiceResponse: AWSDecodableShape { - /// The full description of your service following the create call. A service will return either a capacityProviderStrategy or launchType - /// parameter, but not both, depending where one was specified when it was created. If a service is using the ECS deployment controller, the - /// deploymentController and taskSets parameters will not be returned. if the service uses the CODE_DEPLOY deployment controller, the - /// deploymentController, taskSets and deployments parameters - /// will be returned, however the deployments parameter will be an empty list. + /// The full description of your service following the create call. A service will return either a capacityProviderStrategy or + /// launchType parameter, but not both, depending where one was specified + /// when it was created. If a service is using the ECS deployment controller, the + /// deploymentController and taskSets parameters will not be + /// returned. if the service uses the CODE_DEPLOY deployment controller, the + /// deploymentController, taskSets and + /// deployments parameters will be returned, however the + /// deployments parameter will be an empty list. public let service: Service? @inlinable @@ -2121,55 +2224,59 @@ extension ECS { public struct CreateTaskSetRequest: AWSEncodableShape { /// The capacity provider strategy to use for the task set. A capacity provider strategy consists of one or more capacity providers along with the - /// base and weight to assign to them. A capacity provider must be associated - /// with the cluster to be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster. - /// Only capacity providers with an ACTIVE or UPDATING status can be used. If a capacityProviderStrategy is specified, the launchType parameter must - /// be omitted. If no capacityProviderStrategy or launchType is specified, the - /// defaultCapacityProviderStrategy for the cluster is used. If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already - /// be created. New capacity providers can be created with the CreateCapacityProviderProviderAPI operation. To use a Fargate capacity provider, specify either the FARGATE or - /// FARGATE_SPOT capacity providers. The Fargate capacity providers are available to all - /// accounts and only need to be associated with a cluster to be used. The PutClusterCapacityProviders API operation is used to update the list of available capacity - /// providers for a cluster after the cluster is created. + /// base and weight to assign to them. A capacity provider + /// must be associated with the cluster to be used in a capacity provider strategy. The + /// PutClusterCapacityProviders API is used to associate a capacity provider + /// with a cluster. Only capacity providers with an ACTIVE or + /// UPDATING status can be used. If a capacityProviderStrategy is specified, the launchType + /// parameter must be omitted. If no capacityProviderStrategy or + /// launchType is specified, the + /// defaultCapacityProviderStrategy for the cluster is used. If specifying a capacity provider that uses an Auto Scaling group, the capacity + /// provider must already be created. New capacity providers can be created with the CreateCapacityProviderProviderAPI operation. To use a Fargate capacity provider, specify either the FARGATE or + /// FARGATE_SPOT capacity providers. The Fargate capacity providers are + /// available to all accounts and only need to be associated with a cluster to be + /// used. The PutClusterCapacityProviders API operation is used to update the list of + /// available capacity providers for a cluster after the cluster is created. public let capacityProviderStrategy: [CapacityProviderStrategyItem]? /// An identifier that you provide to ensure the idempotency of the request. It must be /// unique and is case sensitive. Up to 36 ASCII characters in the range of 33-126 (inclusive) are allowed. public let clientToken: String? - /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service to create the task set - /// in. + /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service to create the + /// task set in. public let cluster: String - /// An optional non-unique tag that identifies this task set in external systems. If the task set is - /// associated with a service discovery registry, the tasks in this task set will have the - /// ECS_TASK_SET_EXTERNAL_ID Cloud Map attribute set to the provided + /// An optional non-unique tag that identifies this task set in external systems. If the + /// task set is associated with a service discovery registry, the tasks in this task set + /// will have the ECS_TASK_SET_EXTERNAL_ID Cloud Map attribute set to the provided /// value. public let externalId: String? - /// The launch type that new tasks in the task set uses. For more information, see Amazon ECS launch - /// types in the Amazon Elastic Container Service Developer Guide. If a launchType is specified, the capacityProviderStrategy parameter must - /// be omitted. + /// The launch type that new tasks in the task set uses. For more information, see Amazon ECS + /// launch types in the Amazon Elastic Container Service Developer Guide. If a launchType is specified, the capacityProviderStrategy + /// parameter must be omitted. public let launchType: LaunchType? - /// A load balancer object representing the load balancer to use with the task set. The supported load - /// balancer types are either an Application Load Balancer or a Network Load Balancer. + /// A load balancer object representing the load balancer to use with the task set. The + /// supported load balancer types are either an Application Load Balancer or a Network Load Balancer. public let loadBalancers: [LoadBalancer]? /// An object representing the network configuration for a task set. public let networkConfiguration: NetworkConfiguration? - /// The platform version that the tasks in the task set uses. A platform version is specified only for - /// tasks using the Fargate launch type. If one isn't specified, the LATEST - /// platform version is used. + /// The platform version that the tasks in the task set uses. A platform version is + /// specified only for tasks using the Fargate launch type. If one isn't + /// specified, the LATEST platform version is used. public let platformVersion: String? - /// A floating-point percentage of the desired number of tasks to place and keep running in the task - /// set. + /// A floating-point percentage of the desired number of tasks to place and keep running + /// in the task set. public let scale: Scale? /// The short name or full Amazon Resource Name (ARN) of the service to create the task set in. public let service: String - /// The details of the service discovery registries to assign to this task set. For more information, see - /// Service + /// The details of the service discovery registries to assign to this task set. For more + /// information, see Service /// discovery. public let serviceRegistries: [ServiceRegistry]? - /// The metadata that you apply to the task set to help you categorize and organize them. Each tag - /// consists of a key and an optional value. You define both. When a service is deleted, the tags are - /// deleted. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. + /// The metadata that you apply to the task set to help you categorize and organize them. + /// Each tag consists of a key and an optional value. You define both. When a service is + /// deleted, the tags are deleted. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. public let tags: [Tag]? - /// The task definition for the tasks in the task set to use. If a revision isn't specified, the latest - /// ACTIVE revision is used. + /// The task definition for the tasks in the task set to use. If a revision isn't + /// specified, the latest ACTIVE revision is used. public let taskDefinition: String @inlinable @@ -2217,9 +2324,10 @@ extension ECS { } public struct CreateTaskSetResponse: AWSDecodableShape { - /// Information about a set of Amazon ECS tasks in either an CodeDeploy or an EXTERNAL deployment. - /// A task set includes details such as the desired number of tasks, how many tasks are running, and - /// whether the task set serves production traffic. + /// Information about a set of Amazon ECS tasks in either an CodeDeploy or an + /// EXTERNAL deployment. A task set includes details such as the desired + /// number of tasks, how many tasks are running, and whether the task set serves production + /// traffic. public let taskSet: TaskSet? @inlinable @@ -2233,11 +2341,11 @@ extension ECS { } public struct CreatedAt: AWSEncodableShape { - /// Include service deployments in the result that were created after this time. The format is yyyy-MM-dd - /// HH:mm:ss.SSSSSS. + /// Include service deployments in the result that were created after this time. The + /// format is yyyy-MM-dd HH:mm:ss.SSSSSS. public let after: Date? - /// Include service deployments in the result that were created before this time. The format is yyyy-MM-dd - /// HH:mm:ss.SSSSSS. + /// Include service deployments in the result that were created before this time. The + /// format is yyyy-MM-dd HH:mm:ss.SSSSSS. public let before: Date? @inlinable @@ -2253,17 +2361,18 @@ extension ECS { } public struct DeleteAccountSettingRequest: AWSEncodableShape { - /// The resource name to disable the account setting for. If serviceLongArnFormat is - /// specified, the ARN for your Amazon ECS services is affected. If taskLongArnFormat is - /// specified, the ARN and resource ID for your Amazon ECS tasks is affected. If - /// containerInstanceLongArnFormat is specified, the ARN and resource ID for your Amazon ECS - /// container instances is affected. If awsvpcTrunking is specified, the ENI limit for your - /// Amazon ECS container instances is affected. + /// The resource name to disable the account setting for. If + /// serviceLongArnFormat is specified, the ARN for your Amazon ECS services is + /// affected. If taskLongArnFormat is specified, the ARN and resource ID for + /// your Amazon ECS tasks is affected. If containerInstanceLongArnFormat is + /// specified, the ARN and resource ID for your Amazon ECS container instances is affected. If + /// awsvpcTrunking is specified, the ENI limit for your Amazon ECS container + /// instances is affected. public let name: SettingName - /// The Amazon Resource Name (ARN) of the principal. It can be an user, role, or the root user. If you - /// specify the root user, it disables the account setting for all users, roles, and the root user of the account - /// unless a user or role explicitly overrides these settings. If this field is omitted, the setting is - /// changed only for the authenticated user. + /// The Amazon Resource Name (ARN) of the principal. It can be an user, role, or the + /// root user. If you specify the root user, it disables the account setting for all users, roles, + /// and the root user of the account unless a user or role explicitly overrides these settings. + /// If this field is omitted, the setting is changed only for the authenticated user. public let principalArn: String? @inlinable @@ -2293,12 +2402,13 @@ extension ECS { } public struct DeleteAttributesRequest: AWSEncodableShape { - /// The attributes to delete from your resource. You can specify up to 10 attributes for each request. - /// For custom attributes, specify the attribute name and target ID, but don't specify the value. If you - /// specify the target ID using the short form, you must also specify the target type. + /// The attributes to delete from your resource. You can specify up to 10 attributes for + /// each request. For custom attributes, specify the attribute name and target ID, but don't + /// specify the value. If you specify the target ID using the short form, you must also + /// specify the target type. public let attributes: [Attribute] - /// The short name or full Amazon Resource Name (ARN) of the cluster that contains the resource to delete attributes. - /// If you do not specify a cluster, the default cluster is assumed. + /// The short name or full Amazon Resource Name (ARN) of the cluster that contains the resource to delete + /// attributes. If you do not specify a cluster, the default cluster is assumed. public let cluster: String? @inlinable @@ -2387,9 +2497,9 @@ extension ECS { /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service to delete. /// If you do not specify a cluster, the default cluster is assumed. public let cluster: String? - /// If true, allows you to delete a service even if it wasn't scaled down to zero tasks. - /// It's only necessary to use this if the service uses the REPLICA scheduling - /// strategy. + /// If true, allows you to delete a service even if it wasn't scaled down to + /// zero tasks. It's only necessary to use this if the service uses the REPLICA + /// scheduling strategy. public let force: Bool? /// The name of the service to delete. public let service: String @@ -2423,8 +2533,9 @@ extension ECS { } public struct DeleteTaskDefinitionsRequest: AWSEncodableShape { - /// The family and revision (family:revision) or full Amazon Resource Name (ARN) of - /// the task definition to delete. You must specify a revision. You can specify up to 10 task definitions as a comma separated list. + /// The family and revision (family:revision) or + /// full Amazon Resource Name (ARN) of the task definition to delete. You must specify a + /// revision. You can specify up to 10 task definitions as a comma separated list. public let taskDefinitions: [String] @inlinable @@ -2456,12 +2567,14 @@ extension ECS { } public struct DeleteTaskSetRequest: AWSEncodableShape { - /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task set found in to - /// delete. + /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task + /// set found in to delete. public let cluster: String - /// If true, you can delete a task set even if it hasn't been scaled down to zero. + /// If true, you can delete a task set even if it hasn't been scaled down to + /// zero. public let force: Bool? - /// The short name or full Amazon Resource Name (ARN) of the service that hosts the task set to delete. + /// The short name or full Amazon Resource Name (ARN) of the service that hosts the task set to + /// delete. public let service: String /// The task set ID or full Amazon Resource Name (ARN) of the task set to delete. public let taskSet: String @@ -2504,44 +2617,50 @@ extension ECS { /// The most recent desired count of tasks that was specified for the service to deploy or /// maintain. public let desiredCount: Int? - /// The number of consecutively failed tasks in the deployment. A task is considered a failure if the - /// service scheduler can't launch the task, the task doesn't transition to a RUNNING state, - /// or if it fails any of its defined health checks and is stopped. Once a service deployment has one or more successfully running tasks, the failed task count - /// resets to zero and stops being evaluated. + /// The number of consecutively failed tasks in the deployment. A task is considered a + /// failure if the service scheduler can't launch the task, the task doesn't transition to a + /// RUNNING state, or if it fails any of its defined health checks and is + /// stopped. Once a service deployment has one or more successfully running tasks, the failed + /// task count resets to zero and stops being evaluated. public let failedTasks: Int? /// The Fargate ephemeral storage settings for the deployment. public let fargateEphemeralStorage: DeploymentEphemeralStorage? /// The ID of the deployment. public let id: String? - /// The launch type the tasks in the service are using. For more information, see Amazon ECS Launch - /// Types in the Amazon Elastic Container Service Developer Guide. + /// The launch type the tasks in the service are using. For more information, see Amazon ECS + /// Launch Types in the Amazon Elastic Container Service Developer Guide. public let launchType: LaunchType? /// The VPC subnet and security group configuration for tasks that receive their own elastic network interface by using the awsvpc networking mode. public let networkConfiguration: NetworkConfiguration? - /// The number of tasks in the deployment that are in the PENDING status. + /// The number of tasks in the deployment that are in the PENDING + /// status. public let pendingCount: Int? - /// The operating system that your tasks in the service, or tasks are running on. A platform family is - /// specified only for tasks using the Fargate launch type. All tasks that run as part of this service must use the same platformFamily value as - /// the service, for example, LINUX.. + /// The operating system that your tasks in the service, or tasks are running on. A + /// platform family is specified only for tasks using the Fargate launch type. All tasks that run as part of this service must use the same + /// platformFamily value as the service, for example, + /// LINUX.. public let platformFamily: String? - /// The platform version that your tasks in the service run on. A platform version is only specified for - /// tasks using the Fargate launch type. If one isn't specified, the LATEST - /// platform version is used. For more information, see Fargate Platform Versions in - /// the Amazon Elastic Container Service Developer Guide. + /// The platform version that your tasks in the service run on. A platform version is only + /// specified for tasks using the Fargate launch type. If one isn't specified, + /// the LATEST platform version is used. For more information, see Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide. public let platformVersion: String? - /// The rolloutState of a service is only returned for services that use the rolling - /// update (ECS) deployment type that aren't behind a Classic Load Balancer. The rollout state of the deployment. When a service deployment is started, it begins in an - /// IN_PROGRESS state. When the service reaches a steady state, the deployment transitions - /// to a COMPLETED state. If the service fails to reach a steady state and circuit breaker is - /// turned on, the deployment transitions to a FAILED state. A deployment in - /// FAILED state doesn't launch any new tasks. For more information, see DeploymentCircuitBreaker. + /// The rolloutState of a service is only returned for services that use + /// the rolling update (ECS) deployment type that aren't behind a + /// Classic Load Balancer. The rollout state of the deployment. When a service deployment is started, it begins + /// in an IN_PROGRESS state. When the service reaches a steady state, the + /// deployment transitions to a COMPLETED state. If the service fails to reach + /// a steady state and circuit breaker is turned on, the deployment transitions to a + /// FAILED state. A deployment in FAILED state doesn't launch + /// any new tasks. For more information, see DeploymentCircuitBreaker. public let rolloutState: DeploymentRolloutState? /// A description of the rollout state of a deployment. public let rolloutStateReason: String? - /// The number of tasks in the deployment that are in the RUNNING status. + /// The number of tasks in the deployment that are in the RUNNING + /// status. public let runningCount: Int? - /// The details of the Service Connect configuration that's used by this deployment. Compare the - /// configuration between multiple deployments when troubleshooting issues with new deployments. The configuration for this service to discover and connect to + /// The details of the Service Connect configuration that's used by this deployment. + /// Compare the configuration between multiple deployments when troubleshooting issues with + /// new deployments. The configuration for this service to discover and connect to /// services, and be discovered by, and connected from, other services within a namespace. Tasks that run in a namespace can use short names to connect /// to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. /// Tasks connect through a managed proxy container @@ -2549,19 +2668,20 @@ extension ECS { /// Only the tasks that Amazon ECS services create are supported with Service Connect. /// For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide. public let serviceConnectConfiguration: ServiceConnectConfiguration? - /// The list of Service Connect resources that are associated with this deployment. Each list entry maps - /// a discovery name to a Cloud Map service name. + /// The list of Service Connect resources that are associated with this deployment. Each + /// list entry maps a discovery name to a Cloud Map service name. public let serviceConnectResources: [ServiceConnectServiceResource]? - /// The status of the deployment. The following describes each state. PRIMARY The most recent deployment of a service. ACTIVE A service deployment that still has running tasks, but are in the process of being - /// replaced with a new PRIMARY deployment. INACTIVE A deployment that has been completely replaced. + /// The status of the deployment. The following describes each state. PRIMARY The most recent deployment of a service. ACTIVE A service deployment that still has running tasks, but are in the process + /// of being replaced with a new PRIMARY deployment. INACTIVE A deployment that has been completely replaced. public let status: String? - /// The most recent task definition that was specified for the tasks in the service to use. + /// The most recent task definition that was specified for the tasks in the service to + /// use. public let taskDefinition: String? /// The Unix timestamp for the time when the service deployment was last updated. public let updatedAt: Date? - /// The details of the volume that was configuredAtLaunch. You can configure different - /// settings like the size, throughput, volumeType, and ecryption in ServiceManagedEBSVolumeConfiguration. The name of the volume must match the - /// name from the task definition. + /// The details of the volume that was configuredAtLaunch. You can configure + /// different settings like the size, throughput, volumeType, and ecryption in ServiceManagedEBSVolumeConfiguration. The name of the volume + /// must match the name from the task definition. public let volumeConfigurations: [ServiceVolumeConfiguration]? /// The VPC Lattice configuration for the service deployment. public let vpcLatticeConfigurations: [VpcLatticeConfiguration]? @@ -2619,11 +2739,12 @@ extension ECS { public struct DeploymentAlarms: AWSEncodableShape & AWSDecodableShape { /// One or more CloudWatch alarm names. Use a "," to separate the alarms. public let alarmNames: [String] - /// Determines whether to use the CloudWatch alarm option in the service deployment process. + /// Determines whether to use the CloudWatch alarm option in the service deployment + /// process. public let enable: Bool - /// Determines whether to configure Amazon ECS to roll back the service if a service deployment fails. If - /// rollback is used, when a service deployment fails, the service is rolled back to the last deployment - /// that completed successfully. + /// Determines whether to configure Amazon ECS to roll back the service if a service deployment + /// fails. If rollback is used, when a service deployment fails, the service is rolled back + /// to the last deployment that completed successfully. public let rollback: Bool @inlinable @@ -2643,9 +2764,9 @@ extension ECS { public struct DeploymentCircuitBreaker: AWSEncodableShape & AWSDecodableShape { /// Determines whether to use the deployment circuit breaker logic for the service. public let enable: Bool - /// Determines whether to configure Amazon ECS to roll back the service if a service deployment fails. If - /// rollback is on, when a service deployment fails, the service is rolled back to the last deployment that - /// completed successfully. + /// Determines whether to configure Amazon ECS to roll back the service if a service deployment + /// fails. If rollback is on, when a service deployment fails, the service is rolled back to + /// the last deployment that completed successfully. public let rollback: Bool @inlinable @@ -2663,61 +2784,86 @@ extension ECS { public struct DeploymentConfiguration: AWSEncodableShape & AWSDecodableShape { /// Information about the CloudWatch alarms. public let alarms: DeploymentAlarms? - /// The deployment circuit breaker can only be used for services using the rolling update - /// (ECS) deployment type. The deployment circuit breaker determines whether a service - /// deployment will fail if the service can't reach a steady state. If you use the deployment circuit - /// breaker, a service deployment will transition to a failed state and stop launching new tasks. If you - /// use the rollback option, when a service deployment fails, the service is rolled back to the last - /// deployment that completed successfully. For more information, see Rolling update in the - /// Amazon Elastic Container Service Developer Guide + /// The deployment circuit breaker can only be used for services using the rolling + /// update (ECS) deployment type. The deployment circuit breaker determines whether a + /// service deployment will fail if the service can't reach a steady state. If you use the + /// deployment circuit breaker, a service deployment will transition to a failed state and + /// stop launching new tasks. If you use the rollback option, when a service deployment + /// fails, the service is rolled back to the last deployment that completed successfully. + /// For more information, see Rolling + /// update in the Amazon Elastic Container Service Developer + /// Guide public let deploymentCircuitBreaker: DeploymentCircuitBreaker? /// If a service is using the rolling update (ECS) deployment type, the - /// maximumPercent parameter represents an upper limit on the number of your service's - /// tasks that are allowed in the RUNNING or PENDING state during a deployment, - /// as a percentage of the desiredCount (rounded down to the nearest integer). This parameter - /// enables you to define the deployment batch size. For example, if your service is using the - /// REPLICA service scheduler and has a desiredCount of four tasks and a - /// maximumPercent value of 200%, the scheduler may start four new tasks before stopping - /// the four older tasks (provided that the cluster resources required to do this are available). The - /// default maximumPercent value for a service using the REPLICA service - /// scheduler is 200%. If a service is using either the blue/green (CODE_DEPLOY) or EXTERNAL - /// deployment types, and tasks in the service use the EC2 launch type, the maximum percent value is set to the default value. The maximum percent value is used to define the upper limit on the number of the tasks in - /// the service that remain in the RUNNING state while the container instances are in the - /// DRAINING state. You can't specify a custom maximumPercent value for a service that uses either the - /// blue/green (CODE_DEPLOY) or EXTERNAL deployment types and has tasks that - /// use the EC2 launch type. If the tasks in the service use the Fargate launch type, the maximum percent value is - /// not used, although it is returned when describing your service. + /// maximumPercent parameter represents an upper limit on the number of + /// your service's tasks that are allowed in the RUNNING or + /// PENDING state during a deployment, as a percentage of the + /// desiredCount (rounded down to the nearest integer). This parameter + /// enables you to define the deployment batch size. For example, if your service is using + /// the REPLICA service scheduler and has a desiredCount of four + /// tasks and a maximumPercent value of 200%, the scheduler may start four new + /// tasks before stopping the four older tasks (provided that the cluster resources required + /// to do this are available). The default maximumPercent value for a service + /// using the REPLICA service scheduler is 200%. The Amazon ECS scheduler uses this parameter to replace unhealthy tasks by starting + /// replacement tasks first and then stopping the unhealthy tasks, as long as cluster + /// resources for starting replacement tasks are available. For more information about how + /// the scheduler replaces unhealthy tasks, see Amazon ECS + /// services. If a service is using either the blue/green (CODE_DEPLOY) or + /// EXTERNAL deployment types, and tasks in the service use the + /// EC2 launch type, the maximum percent + /// value is set to the default value. The maximum percent + /// value is used to define the upper limit on the number of the tasks in the service that + /// remain in the RUNNING state while the container instances are in the + /// DRAINING state. You can't specify a custom maximumPercent value for a service that + /// uses either the blue/green (CODE_DEPLOY) or EXTERNAL + /// deployment types and has tasks that use the EC2 launch type. If the service uses either the blue/green (CODE_DEPLOY) or EXTERNAL + /// deployment types, and the tasks in the service use the Fargate launch type, the maximum + /// percent value is not used. The value is still returned when describing your service. public let maximumPercent: Int? /// If a service is using the rolling update (ECS) deployment type, the - /// minimumHealthyPercent represents a lower limit on the number of your service's tasks - /// that must remain in the RUNNING state during a deployment, as a percentage of the - /// desiredCount (rounded up to the nearest integer). This parameter enables you to deploy - /// without using additional cluster capacity. For example, if your service has a desiredCount - /// of four tasks and a minimumHealthyPercent of 50%, the service scheduler may stop two - /// existing tasks to free up cluster capacity before starting two new tasks. For services that do not use a load balancer, the following should be - /// noted: A service is considered healthy if all essential containers within the tasks in the service - /// pass their health checks. If a task has no essential containers with a health check defined, the service scheduler will - /// wait for 40 seconds after a task reaches a RUNNING state before the task is - /// counted towards the minimum healthy percent total. If a task has one or more essential containers with a health check defined, the service - /// scheduler will wait for the task to reach a healthy status before counting it towards the - /// minimum healthy percent total. A task is considered healthy when all essential containers - /// within the task have passed their health checks. The amount of time the service scheduler can - /// wait for is determined by the container health check settings. For services that do use a load balancer, the following should be noted: If a task has no essential containers with a health check defined, the service scheduler will - /// wait for the load balancer target group health check to return a healthy status before counting - /// the task towards the minimum healthy percent total. If a task has an essential container with a health check defined, the service scheduler will - /// wait for both the task to reach a healthy status and the load balancer target group health - /// check to return a healthy status before counting the task towards the minimum healthy percent - /// total. The default value for a replica service for minimumHealthyPercent is 100%. The default - /// minimumHealthyPercent value for a service using the DAEMON service - /// schedule is 0% for the CLI, the Amazon Web Services SDKs, and the APIs and 50% for the Amazon Web Services Management Console. The minimum number of healthy tasks during a deployment is the desiredCount multiplied - /// by the minimumHealthyPercent/100, rounded up to the nearest integer value. If a service is using either the blue/green (CODE_DEPLOY) or EXTERNAL - /// deployment types and is running tasks that use the EC2 launch type, the minimum healthy percent value is set to the default value. The minimum healthy percent value is used to define the lower limit on the - /// number of the tasks in the service that remain in the RUNNING state while the container - /// instances are in the DRAINING state. You can't specify a custom minimumHealthyPercent value for a service that uses - /// either the blue/green (CODE_DEPLOY) or EXTERNAL deployment types and has - /// tasks that use the EC2 launch type. If a service is using either the blue/green (CODE_DEPLOY) or EXTERNAL - /// deployment types and is running tasks that use the Fargate launch type, the minimum - /// healthy percent value is not used, although it is returned when describing your service. + /// minimumHealthyPercent represents a lower limit on the number of your + /// service's tasks that must remain in the RUNNING state during a deployment, + /// as a percentage of the desiredCount (rounded up to the nearest integer). + /// This parameter enables you to deploy without using additional cluster capacity. For + /// example, if your service has a desiredCount of four tasks and a + /// minimumHealthyPercent of 50%, the service scheduler may stop two + /// existing tasks to free up cluster capacity before starting two new tasks. If any tasks are unhealthy and if maximumPercent doesn't allow the Amazon ECS + /// scheduler to start replacement tasks, the scheduler stops the unhealthy tasks one-by-one + /// — using the minimumHealthyPercent as a constraint — to clear up capacity to + /// launch replacement tasks. For more information about how the scheduler replaces + /// unhealthy tasks, see Amazon ECS services . For services that do not use a load balancer, the following + /// should be noted: A service is considered healthy if all essential containers within the tasks + /// in the service pass their health checks. If a task has no essential containers with a health check defined, the service + /// scheduler will wait for 40 seconds after a task reaches a RUNNING + /// state before the task is counted towards the minimum healthy percent + /// total. If a task has one or more essential containers with a health check defined, + /// the service scheduler will wait for the task to reach a healthy status before + /// counting it towards the minimum healthy percent total. A task is considered + /// healthy when all essential containers within the task have passed their health + /// checks. The amount of time the service scheduler can wait for is determined by + /// the container health check settings. For services that do use a load balancer, the following should be + /// noted: If a task has no essential containers with a health check defined, the service + /// scheduler will wait for the load balancer target group health check to return a + /// healthy status before counting the task towards the minimum healthy percent + /// total. If a task has an essential container with a health check defined, the service + /// scheduler will wait for both the task to reach a healthy status and the load + /// balancer target group health check to return a healthy status before counting + /// the task towards the minimum healthy percent total. The default value for a replica service for minimumHealthyPercent is + /// 100%. The default minimumHealthyPercent value for a service using the + /// DAEMON service schedule is 0% for the CLI, the Amazon Web Services SDKs, and the + /// APIs and 50% for the Amazon Web Services Management Console. The minimum number of healthy tasks during a deployment is the + /// desiredCount multiplied by the minimumHealthyPercent/100, + /// rounded up to the nearest integer value. If a service is using either the blue/green (CODE_DEPLOY) or + /// EXTERNAL deployment types and is running tasks that use the + /// EC2 launch type, the minimum healthy + /// percent value is set to the default value. The minimum healthy percent value is used to define the lower limit on the + /// number of the tasks in the service that remain in the RUNNING state while + /// the container instances are in the DRAINING state. You can't specify a custom minimumHealthyPercent value for a service + /// that uses either the blue/green (CODE_DEPLOY) or EXTERNAL + /// deployment types and has tasks that use the EC2 launch type. If a service is using either the blue/green (CODE_DEPLOY) or + /// EXTERNAL deployment types and is running tasks that use the + /// Fargate launch type, the minimum healthy percent value is not used, + /// although it is returned when describing your service. public let minimumHealthyPercent: Int? @inlinable @@ -2737,15 +2883,18 @@ extension ECS { } public struct DeploymentController: AWSEncodableShape & AWSDecodableShape { - /// The deployment controller type to use. There are three deployment controller types available: ECS The rolling update (ECS) deployment type involves replacing the current - /// running version of the container with the latest version. The number of containers Amazon ECS - /// adds or removes from the service during a rolling update is controlled by adjusting the - /// minimum and maximum number of healthy tasks allowed during a service deployment, as - /// specified in the DeploymentConfiguration. For more information about rolling deployments, see Deploy Amazon ECS services by replacing tasks in the Amazon Elastic Container Service Developer Guide. CODE_DEPLOY The blue/green (CODE_DEPLOY) deployment type uses the blue/green deployment - /// model powered by CodeDeploy, which allows you to verify a new deployment of a service before - /// sending production traffic to it. For more information about blue/green deployments, see Validate the state of an Amazon ECS service before deployment in the Amazon Elastic Container Service Developer Guide. EXTERNAL The external (EXTERNAL) deployment type enables you to use any third-party - /// deployment controller for full control over the deployment process for an Amazon ECS - /// service. For more information about external deployments, see Deploy Amazon ECS services using a third-party controller in the Amazon Elastic Container Service Developer Guide. + /// The deployment controller type to use. There are three deployment controller types available: ECS The rolling update (ECS) deployment type involves replacing + /// the current running version of the container with the latest version. The + /// number of containers Amazon ECS adds or removes from the service during a rolling + /// update is controlled by adjusting the minimum and maximum number of healthy + /// tasks allowed during a service deployment, as specified in the DeploymentConfiguration. For more information about rolling deployments, see Deploy + /// Amazon ECS services by replacing tasks in the Amazon Elastic Container Service Developer Guide. CODE_DEPLOY The blue/green (CODE_DEPLOY) deployment type uses the + /// blue/green deployment model powered by CodeDeploy, which allows you to verify a + /// new deployment of a service before sending production traffic to it. For more information about blue/green deployments, see Validate the state of an Amazon ECS service before deployment in + /// the Amazon Elastic Container Service Developer Guide. EXTERNAL The external (EXTERNAL) deployment type enables you to use + /// any third-party deployment controller for full control over the deployment + /// process for an Amazon ECS service. For more information about external deployments, see Deploy Amazon ECS services using a third-party controller in the + /// Amazon Elastic Container Service Developer Guide. public let type: DeploymentControllerType @inlinable @@ -2759,7 +2908,8 @@ extension ECS { } public struct DeploymentEphemeralStorage: AWSDecodableShape { - /// Specify an Key Management Service key ID to encrypt the ephemeral storage for deployment. + /// Specify an Key Management Service key ID to encrypt the ephemeral storage for + /// deployment. public let kmsKeyId: String? @inlinable @@ -2773,21 +2923,21 @@ extension ECS { } public struct DeregisterContainerInstanceRequest: AWSEncodableShape { - /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instance to deregister. - /// If you do not specify a cluster, the default cluster is assumed. + /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instance to + /// deregister. If you do not specify a cluster, the default cluster is assumed. public let cluster: String? - /// The container instance ID or full ARN of the container instance to deregister. For more information - /// about the ARN format, see Amazon Resource Name (ARN) - /// in the Amazon ECS Developer Guide. + /// The container instance ID or full ARN of the container instance to deregister. For + /// more information about the ARN format, see Amazon Resource Name (ARN) in the Amazon ECS Developer Guide. public let containerInstance: String - /// Forces the container instance to be deregistered. If you have tasks running on the container instance - /// when you deregister it with the force option, these tasks remain running until you - /// terminate the instance or the tasks stop through some other means, but they're orphaned (no longer - /// monitored or accounted for by Amazon ECS). If an orphaned task on your container instance is part of an - /// Amazon ECS service, then the service scheduler starts another copy of that task, on a different container - /// instance if possible. Any containers in orphaned service tasks that are registered with a Classic Load Balancer or an Application Load Balancer target group - /// are deregistered. They begin connection draining according to the settings on the load balancer or - /// target group. + /// Forces the container instance to be deregistered. If you have tasks running on the + /// container instance when you deregister it with the force option, these + /// tasks remain running until you terminate the instance or the tasks stop through some + /// other means, but they're orphaned (no longer monitored or accounted for by Amazon ECS). If an + /// orphaned task on your container instance is part of an Amazon ECS service, then the service + /// scheduler starts another copy of that task, on a different container instance if + /// possible. Any containers in orphaned service tasks that are registered with a Classic Load Balancer or an Application Load Balancer + /// target group are deregistered. They begin connection draining according to the settings + /// on the load balancer or target group. public let force: Bool? @inlinable @@ -2819,8 +2969,9 @@ extension ECS { } public struct DeregisterTaskDefinitionRequest: AWSEncodableShape { - /// The family and revision (family:revision) or full Amazon Resource Name (ARN) of - /// the task definition to deregister. You must specify a revision. + /// The family and revision (family:revision) or + /// full Amazon Resource Name (ARN) of the task definition to deregister. You must specify a + /// revision. public let taskDefinition: String @inlinable @@ -2848,27 +2999,29 @@ extension ECS { } public struct DescribeCapacityProvidersRequest: AWSEncodableShape { - /// The short name or full Amazon Resource Name (ARN) of one or more capacity providers. Up to 100 capacity - /// providers can be described in an action. + /// The short name or full Amazon Resource Name (ARN) of one or more capacity providers. Up to + /// 100 capacity providers can be described in an action. public let capacityProviders: [String]? - /// Specifies whether or not you want to see the resource tags for the capacity provider. If - /// TAGS is specified, the tags are included in the response. If this field is omitted, - /// tags aren't included in the response. + /// Specifies whether or not you want to see the resource tags for the capacity provider. + /// If TAGS is specified, the tags are included in the response. If this field + /// is omitted, tags aren't included in the response. public let include: [CapacityProviderField]? - /// The maximum number of account setting results returned by DescribeCapacityProviders in - /// paginated output. When this parameter is used, DescribeCapacityProviders only returns - /// maxResults results in a single page along with a nextToken response - /// element. The remaining results of the initial request can be seen by sending another - /// DescribeCapacityProviders request with the returned nextToken value. This - /// value can be between 1 and 10. If - /// this parameter is not used, then DescribeCapacityProviders returns up to - /// 10 results and a nextToken value if - /// applicable. + /// The maximum number of account setting results returned by + /// DescribeCapacityProviders in paginated output. When this parameter is + /// used, DescribeCapacityProviders only returns maxResults + /// results in a single page along with a nextToken response element. The + /// remaining results of the initial request can be seen by sending another + /// DescribeCapacityProviders request with the returned + /// nextToken value. This value can be between + /// 1 and 10. If this + /// parameter is not used, then DescribeCapacityProviders returns up to + /// 10 results and a nextToken value + /// if applicable. public let maxResults: Int? /// The nextToken value returned from a previous paginated - /// DescribeCapacityProviders request where maxResults was used and the - /// results exceeded the value of that parameter. Pagination continues from the end of the previous results - /// that returned the nextToken value. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. + /// DescribeCapacityProviders request where maxResults was + /// used and the results exceeded the value of that parameter. Pagination continues from the + /// end of the previous results that returned the nextToken value. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. public let nextToken: String? @inlinable @@ -2892,10 +3045,11 @@ extension ECS { public let capacityProviders: [CapacityProvider]? /// Any failures associated with the call. public let failures: [Failure]? - /// The nextToken value to include in a future DescribeCapacityProviders - /// request. When the results of a DescribeCapacityProviders request exceed - /// maxResults, this value can be used to retrieve the next page of results. This value is - /// null when there are no more results to return. + /// The nextToken value to include in a future + /// DescribeCapacityProviders request. When the results of a + /// DescribeCapacityProviders request exceed maxResults, this + /// value can be used to retrieve the next page of results. This value is null + /// when there are no more results to return. public let nextToken: String? @inlinable @@ -2913,12 +3067,16 @@ extension ECS { } public struct DescribeClustersRequest: AWSEncodableShape { - /// A list of up to 100 cluster names or full cluster Amazon Resource Name (ARN) entries. If you do not specify a cluster, the default cluster is assumed. + /// A list of up to 100 cluster names or full cluster Amazon Resource Name (ARN) entries. + /// If you do not specify a cluster, the default cluster is assumed. public let clusters: [String]? - /// Determines whether to include additional information about the clusters in the response. If this - /// field is omitted, this information isn't included. If ATTACHMENTS is specified, the attachments for the container instances or tasks within - /// the cluster are included, for example the capacity providers. If SETTINGS is specified, the settings for the cluster are included. If CONFIGURATIONS is specified, the configuration for the cluster is included. If STATISTICS is specified, the task and service count is included, separated by launch - /// type. If TAGS is specified, the metadata tags associated with the cluster are included. + /// Determines whether to include additional information about the clusters in the + /// response. If this field is omitted, this information isn't included. If ATTACHMENTS is specified, the attachments for the container instances + /// or tasks within the cluster are included, for example the capacity providers. If SETTINGS is specified, the settings for the cluster are + /// included. If CONFIGURATIONS is specified, the configuration for the cluster is + /// included. If STATISTICS is specified, the task and service count is included, + /// separated by launch type. If TAGS is specified, the metadata tags associated with the cluster are + /// included. public let include: [ClusterField]? @inlinable @@ -2952,16 +3110,18 @@ extension ECS { } public struct DescribeContainerInstancesRequest: AWSEncodableShape { - /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to describe. - /// If you do not specify a cluster, the default cluster is assumed. This parameter is required if the container instance or container instances - /// you are describing were launched in any cluster other than the default cluster. + /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to + /// describe. If you do not specify a cluster, the default cluster is assumed. This parameter is required if the container instance + /// or container instances you are describing were launched in any cluster other than the + /// default cluster. public let cluster: String? /// A list of up to 100 container instance IDs or full Amazon Resource Name (ARN) entries. public let containerInstances: [String] - /// Specifies whether you want to see the resource tags for the container instance. If TAGS - /// is specified, the tags are included in the response. If CONTAINER_INSTANCE_HEALTH is - /// specified, the container instance health is included in the response. If this field is omitted, tags - /// and container instance health status aren't included in the response. + /// Specifies whether you want to see the resource tags for the container instance. If + /// TAGS is specified, the tags are included in the response. If + /// CONTAINER_INSTANCE_HEALTH is specified, the container instance health + /// is included in the response. If this field is omitted, tags and container instance + /// health status aren't included in the response. public let include: [ContainerInstanceField]? @inlinable @@ -3011,9 +3171,9 @@ extension ECS { } public struct DescribeServiceDeploymentsResponse: AWSDecodableShape { - /// Any failures associated with the call. If you decsribe a deployment with a service revision created before October 25, 2024, the - /// call fails. The failure includes the service revision ARN and the reason set to - /// MISSING. + /// Any failures associated with the call. If you decsribe a deployment with a service revision created before October 25, 2024, + /// the call fails. The failure includes the service revision ARN and the reason set to + /// MISSING. public let failures: [Failure]? /// The list of service deployments described. public let serviceDeployments: [ServiceDeployment]? @@ -3031,8 +3191,7 @@ extension ECS { } public struct DescribeServiceRevisionsRequest: AWSEncodableShape { - /// The ARN of the service revision. You can specify a maximum of 20 ARNs. You can call ListServiceDeployments to - /// get the ARNs. + /// The ARN of the service revision. You can specify a maximum of 20 ARNs. You can call ListServiceDeployments to get the ARNs. public let serviceRevisionArns: [String] @inlinable @@ -3065,15 +3224,15 @@ extension ECS { public struct DescribeServicesRequest: AWSEncodableShape { /// The short name or full Amazon Resource Name (ARN)the cluster that hosts the service to describe. - /// If you do not specify a cluster, the default cluster is assumed. This parameter is required if the service or services you are describing were - /// launched in any cluster other than the default cluster. + /// If you do not specify a cluster, the default cluster is assumed. This parameter is required if the service or services you are + /// describing were launched in any cluster other than the default cluster. public let cluster: String? - /// Determines whether you want to see the resource tags for the service. If TAGS is - /// specified, the tags are included in the response. If this field is omitted, tags aren't included in the - /// response. + /// Determines whether you want to see the resource tags for the service. If + /// TAGS is specified, the tags are included in the response. If this field + /// is omitted, tags aren't included in the response. public let include: [ServiceField]? - /// A list of services to describe. You may specify up to 10 services to describe in a single - /// operation. + /// A list of services to describe. You may specify up to 10 services to describe in a + /// single operation. public let services: [String] @inlinable @@ -3109,13 +3268,14 @@ extension ECS { } public struct DescribeTaskDefinitionRequest: AWSEncodableShape { - /// Determines whether to see the resource tags for the task definition. If TAGS is - /// specified, the tags are included in the response. If this field is omitted, tags aren't included in the - /// response. + /// Determines whether to see the resource tags for the task definition. If + /// TAGS is specified, the tags are included in the response. If this field + /// is omitted, tags aren't included in the response. public let include: [TaskDefinitionField]? - /// The family for the latest ACTIVE revision, family and - /// revision (family:revision) for a specific revision in the family, or full - /// Amazon Resource Name (ARN) of the task definition to describe. + /// The family for the latest ACTIVE revision, + /// family and revision (family:revision) for a + /// specific revision in the family, or full Amazon Resource Name (ARN) of the task definition to + /// describe. public let taskDefinition: String @inlinable @@ -3131,8 +3291,8 @@ extension ECS { } public struct DescribeTaskDefinitionResponse: AWSDecodableShape { - /// The metadata that's applied to the task definition to help you categorize and organize them. Each tag - /// consists of a key and an optional value. You define both. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. + /// The metadata that's applied to the task definition to help you categorize and organize + /// them. Each tag consists of a key and an optional value. You define both. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. public let tags: [Tag]? /// The full task definition description. public let taskDefinition: TaskDefinition? @@ -3150,16 +3310,17 @@ extension ECS { } public struct DescribeTaskSetsRequest: AWSEncodableShape { - /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task sets exist - /// in. + /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task + /// sets exist in. public let cluster: String - /// Specifies whether to see the resource tags for the task set. If TAGS is specified, the - /// tags are included in the response. If this field is omitted, tags aren't included in the - /// response. + /// Specifies whether to see the resource tags for the task set. If TAGS is + /// specified, the tags are included in the response. If this field is omitted, tags aren't + /// included in the response. public let include: [TaskSetField]? /// The short name or full Amazon Resource Name (ARN) of the service that the task sets exist in. public let service: String - /// The ID or full Amazon Resource Name (ARN) of task sets to describe. + /// The ID or full Amazon Resource Name (ARN) of task sets to + /// describe. public let taskSets: [String]? @inlinable @@ -3197,13 +3358,13 @@ extension ECS { } public struct DescribeTasksRequest: AWSEncodableShape { - /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task or tasks to describe. - /// If you do not specify a cluster, the default cluster is assumed. This parameter is required. If you do not specify a value, the - /// default cluster is used. + /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task or tasks to + /// describe. If you do not specify a cluster, the default cluster is assumed. This parameter is required. If you do not specify a + /// value, the default cluster is used. public let cluster: String? - /// Specifies whether you want to see the resource tags for the task. If TAGS is specified, - /// the tags are included in the response. If this field is omitted, tags aren't included in the - /// response. + /// Specifies whether you want to see the resource tags for the task. If TAGS + /// is specified, the tags are included in the response. If this field is omitted, tags + /// aren't included in the response. public let include: [TaskField]? /// A list of up to 100 task IDs or full ARN entries. public let tasks: [String] @@ -3245,8 +3406,9 @@ extension ECS { public let containerPath: String? /// The path for the device on the host container instance. public let hostPath: String - /// The explicit permissions to provide to the container for the device. By default, the container has - /// permissions for read, write, and mknod for the device. + /// The explicit permissions to provide to the container for the device. By default, the + /// container has permissions for read, write, and + /// mknod for the device. public let permissions: [DeviceCgroupPermission]? @inlinable @@ -3264,11 +3426,11 @@ extension ECS { } public struct DiscoverPollEndpointRequest: AWSEncodableShape { - /// The short name or full Amazon Resource Name (ARN) of the cluster that the container instance belongs to. + /// The short name or full Amazon Resource Name (ARN) of the cluster that the container instance belongs + /// to. public let cluster: String? - /// The container instance ID or full ARN of the container instance. For more information about the - /// ARN format, see Amazon Resource Name (ARN) - /// in the Amazon ECS Developer Guide. + /// The container instance ID or full ARN of the container instance. For more + /// information about the ARN format, see Amazon Resource Name (ARN) in the Amazon ECS Developer Guide. public let containerInstance: String? @inlinable @@ -3307,25 +3469,29 @@ extension ECS { } public struct DockerVolumeConfiguration: AWSEncodableShape & AWSDecodableShape { - /// If this value is true, the Docker volume is created if it doesn't already exist. This field is only used if the scope is shared. + /// If this value is true, the Docker volume is created if it doesn't already + /// exist. This field is only used if the scope is shared. public let autoprovision: Bool? - /// The Docker volume driver to use. The driver value must match the driver name provided by Docker - /// because it is used for task placement. If the driver was installed using the Docker plugin CLI, use - /// docker plugin ls to retrieve the driver name from your container instance. If the - /// driver was installed using another method, use Docker plugin discovery to retrieve the driver name. - /// This parameter maps to Driver in the docker container create command and the - /// xxdriver option to docker volume create. + /// The Docker volume driver to use. The driver value must match the driver name provided + /// by Docker because it is used for task placement. If the driver was installed using the + /// Docker plugin CLI, use docker plugin ls to retrieve the driver name from + /// your container instance. If the driver was installed using another method, use Docker + /// plugin discovery to retrieve the driver name. This parameter maps to Driver + /// in the docker container create command and the xxdriver option to docker + /// volume create. public let driver: String? /// A map of Docker driver-specific options passed through. This parameter maps to - /// DriverOpts in the docker create-volume command and the xxopt option to - /// docker volume create. + /// DriverOpts in the docker create-volume command and the + /// xxopt option to docker volume create. public let driverOpts: [String: String]? - /// Custom metadata to add to your Docker volume. This parameter maps to Labels in the - /// docker container create command and the xxlabel option to docker volume create. + /// Custom metadata to add to your Docker volume. This parameter maps to + /// Labels in the docker container create command and the + /// xxlabel option to docker volume create. public let labels: [String: String]? - /// The scope for the Docker volume that determines its lifecycle. Docker volumes that are scoped to a - /// task are automatically provisioned when the task starts and destroyed when the task - /// stops. Docker volumes that are scoped as shared persist after the task stops. + /// The scope for the Docker volume that determines its lifecycle. Docker volumes that are + /// scoped to a task are automatically provisioned when the task starts and + /// destroyed when the task stops. Docker volumes that are scoped as shared + /// persist after the task stops. public let scope: Scope? @inlinable @@ -3347,9 +3513,10 @@ extension ECS { } public struct EBSTagSpecification: AWSEncodableShape & AWSDecodableShape { - /// Determines whether to propagate the tags from the task definition to 
the Amazon EBS volume. Tags can only - /// propagate to a SERVICE specified in 
ServiceVolumeConfiguration. If no value - /// is specified, the tags aren't 
propagated. + /// Determines whether to propagate the tags from the task definition to 
the Amazon EBS + /// volume. Tags can only propagate to a SERVICE specified in + /// 
ServiceVolumeConfiguration. If no value is specified, the tags aren't + /// 
propagated. public let propagateTags: PropagateTags? /// The type of volume resource. public let resourceType: EBSResourceType @@ -3379,17 +3546,18 @@ extension ECS { } public struct EFSAuthorizationConfig: AWSEncodableShape & AWSDecodableShape { - /// The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified - /// in the EFSVolumeConfiguration must either be omitted or set to / which will - /// enforce the path set on the EFS access point. If an access point is used, transit encryption must be on - /// in the EFSVolumeConfiguration. For more information, see Working with Amazon EFS access points in the - /// Amazon Elastic File System User Guide. + /// The Amazon EFS access point ID to use. If an access point is specified, the root directory + /// value specified in the EFSVolumeConfiguration must either be omitted or set + /// to / which will enforce the path set on the EFS access point. If an access + /// point is used, transit encryption must be on in the EFSVolumeConfiguration. + /// For more information, see Working with Amazon EFS access + /// points in the Amazon Elastic File System User Guide. public let accessPointId: String? - /// Determines whether to use the Amazon ECS task role defined in a task definition when mounting the Amazon EFS - /// file system. If it is turned on, transit encryption must be turned on in the - /// EFSVolumeConfiguration. If this parameter is omitted, the default value of - /// DISABLED is used. For more information, see Using Amazon EFS access - /// points in the Amazon Elastic Container Service Developer Guide. + /// Determines whether to use the Amazon ECS task role defined in a task definition when + /// mounting the Amazon EFS file system. If it is turned on, transit encryption must be turned on + /// in the EFSVolumeConfiguration. If this parameter is omitted, the default + /// value of DISABLED is used. For more information, see Using + /// Amazon EFS access points in the Amazon Elastic Container Service Developer Guide. public let iam: EFSAuthorizationConfigIAM? @inlinable @@ -3409,20 +3577,22 @@ extension ECS { public let authorizationConfig: EFSAuthorizationConfig? /// The Amazon EFS file system ID to use. public let fileSystemId: String - /// The directory within the Amazon EFS file system to mount as the root directory inside the host. If this - /// parameter is omitted, the root of the Amazon EFS volume will be used. Specifying / will have - /// the same effect as omitting this parameter. If an EFS access point is specified in the authorizationConfig, the root directory - /// parameter must either be omitted or set to / which will enforce the path set on the - /// EFS access point. + /// The directory within the Amazon EFS file system to mount as the root directory inside the + /// host. If this parameter is omitted, the root of the Amazon EFS volume will be used. + /// Specifying / will have the same effect as omitting this parameter. If an EFS access point is specified in the authorizationConfig, the + /// root directory parameter must either be omitted or set to / which will + /// enforce the path set on the EFS access point. public let rootDirectory: String? - /// Determines whether to use encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS - /// server. Transit encryption must be turned on if Amazon EFS IAM authorization is used. If this parameter is - /// omitted, the default value of DISABLED is used. For more information, see Encrypting data in - /// transit in the Amazon Elastic File System User Guide. + /// Determines whether to use encryption for Amazon EFS data in transit between the Amazon ECS host + /// and the Amazon EFS server. Transit encryption must be turned on if Amazon EFS IAM authorization + /// is used. If this parameter is omitted, the default value of DISABLED is + /// used. For more information, see Encrypting data in transit in + /// the Amazon Elastic File System User Guide. public let transitEncryption: EFSTransitEncryption? - /// The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you do - /// not specify a transit encryption port, it will use the port selection strategy that the Amazon EFS mount - /// helper uses. For more information, see EFS mount helper in the Amazon Elastic File System User Guide. + /// The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS + /// server. If you do not specify a transit encryption port, it will use the port selection + /// strategy that the Amazon EFS mount helper uses. For more information, see EFS mount + /// helper in the Amazon Elastic File System User Guide. public let transitEncryptionPort: Int? @inlinable @@ -3444,11 +3614,11 @@ extension ECS { } public struct EnvironmentFile: AWSEncodableShape & AWSDecodableShape { - /// The file type to use. Environment files are objects in Amazon S3. The only supported value is - /// s3. + /// The file type to use. Environment files are objects in Amazon S3. The only supported value + /// is s3. public let type: EnvironmentFileType - /// The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment variable - /// file. + /// The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment + /// variable file. public let value: String @inlinable @@ -3464,9 +3634,9 @@ extension ECS { } public struct EphemeralStorage: AWSEncodableShape & AWSDecodableShape { - /// The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported - /// value is 21 GiB and the maximum supported value is 200 - /// GiB. + /// The total amount, in GiB, of ephemeral storage to set for the task. The minimum + /// supported value is 21 GiB and the maximum supported value is + /// 200 GiB. public let sizeInGiB: Int @inlinable @@ -3480,19 +3650,20 @@ extension ECS { } public struct ExecuteCommandConfiguration: AWSEncodableShape & AWSDecodableShape { - /// Specify an Key Management Service key ID to encrypt the data between the local client and the - /// container. + /// Specify an Key Management Service key ID to encrypt the data between the local client + /// and the container. public let kmsKeyId: String? - /// The log configuration for the results of the execute command actions. The logs can be sent to - /// CloudWatch Logs or an Amazon S3 bucket. When logging=OVERRIDE is specified, a - /// logConfiguration must be provided. + /// The log configuration for the results of the execute command actions. The logs can be + /// sent to CloudWatch Logs or an Amazon S3 bucket. When logging=OVERRIDE is + /// specified, a logConfiguration must be provided. public let logConfiguration: ExecuteCommandLogConfiguration? - /// The log setting to use for redirecting logs for your execute command results. The following log - /// settings are available. NONE: The execute command session is not logged. DEFAULT: The awslogs configuration in the task definition is used. - /// If no logging parameter is specified, it defaults to this value. If no awslogs log - /// driver is configured in the task definition, the output won't be logged. OVERRIDE: Specify the logging details as a part of - /// logConfiguration. If the OVERRIDE logging option is specified, the - /// logConfiguration is required. + /// The log setting to use for redirecting logs for your execute command results. The + /// following log settings are available. NONE: The execute command session is not logged. DEFAULT: The awslogs configuration in the task + /// definition is used. If no logging parameter is specified, it defaults to this + /// value. If no awslogs log driver is configured in the task + /// definition, the output won't be logged. OVERRIDE: Specify the logging details as a part of + /// logConfiguration. If the OVERRIDE logging option + /// is specified, the logConfiguration is required. public let logging: ExecuteCommandLogging? @inlinable @@ -3510,14 +3681,15 @@ extension ECS { } public struct ExecuteCommandLogConfiguration: AWSEncodableShape & AWSDecodableShape { - /// Determines whether to use encryption on the CloudWatch logs. If not specified, encryption will be - /// off. + /// Determines whether to use encryption on the CloudWatch logs. If not specified, + /// encryption will be off. public let cloudWatchEncryptionEnabled: Bool? /// The name of the CloudWatch log group to send logs to. The CloudWatch log group must already be created. public let cloudWatchLogGroupName: String? /// The name of the S3 bucket to send logs to. The S3 bucket must already be created. public let s3BucketName: String? - /// Determines whether to use encryption on the S3 logs. If not specified, encryption is not used. + /// Determines whether to use encryption on the S3 logs. If not specified, encryption is + /// not used. public let s3EncryptionEnabled: Bool? /// An optional folder in the S3 bucket to place logs in. public let s3KeyPrefix: String? @@ -3546,8 +3718,8 @@ extension ECS { public let cluster: String? /// The command to run on the container. public let command: String - /// The name of the container to execute the command on. A container name only needs to be specified for - /// tasks containing multiple containers. + /// The name of the container to execute the command on. A container name only needs to be + /// specified for tasks containing multiple containers. public let container: String? /// Use this flag to run your command in interactive mode. public let interactive: Bool @@ -3579,10 +3751,12 @@ extension ECS { public let containerArn: String? /// The name of the container. public let containerName: String? - /// Determines whether the execute command session is running in interactive mode. Amazon ECS only supports - /// initiating interactive sessions, so you must specify true for this value. + /// Determines whether the execute command session is running in interactive mode. Amazon ECS + /// only supports initiating interactive sessions, so you must specify true for + /// this value. public let interactive: Bool? - /// The details of the SSM session that was created for this instance of execute-command. + /// The details of the SSM session that was created for this instance of + /// execute-command. public let session: Session? /// The Amazon Resource Name (ARN) of the task. public let taskArn: String? @@ -3608,12 +3782,12 @@ extension ECS { } public struct FSxWindowsFileServerAuthorizationConfig: AWSEncodableShape & AWSDecodableShape { - /// The authorization credential option to use. The authorization credential options can be provided - /// using either the Amazon Resource Name (ARN) of an Secrets Manager secret or SSM Parameter Store parameter. The ARN refers to - /// the stored credentials. + /// The authorization credential option to use. The authorization credential options can + /// be provided using either the Amazon Resource Name (ARN) of an Secrets Manager secret or SSM Parameter Store + /// parameter. The ARN refers to the stored credentials. public let credentialsParameter: String - /// A fully qualified domain name hosted by an Directory Service Managed - /// Microsoft AD (Active Directory) or self-hosted AD on Amazon EC2. + /// A fully qualified domain name hosted by an Directory Service Managed Microsoft AD (Active Directory) or self-hosted AD on + /// Amazon EC2. public let domain: String @inlinable @@ -3633,8 +3807,8 @@ extension ECS { public let authorizationConfig: FSxWindowsFileServerAuthorizationConfig /// The Amazon FSx for Windows File Server file system ID to use. public let fileSystemId: String - /// The directory within the Amazon FSx for Windows File Server file system to mount as the root directory inside the - /// host. + /// The directory within the Amazon FSx for Windows File Server file system to mount as the root directory + /// inside the host. public let rootDirectory: String @inlinable @@ -3674,14 +3848,18 @@ extension ECS { } public struct FirelensConfiguration: AWSEncodableShape & AWSDecodableShape { - /// The options to use when configuring the log router. This field is optional and can be used to specify - /// a custom configuration file or to add additional metadata, such as the task, task definition, cluster, - /// and container instance details to the log event. If specified, the syntax to use is + /// The options to use when configuring the log router. This field is optional and can be + /// used to specify a custom configuration file or to add additional metadata, such as the + /// task, task definition, cluster, and container instance details to the log event. If + /// specified, the syntax to use is /// "options":{"enable-ecs-log-metadata":"true|false","config-file-type:"s3|file","config-file-value":"arn:aws:s3:::mybucket/fluent.conf|filepath"}. - /// For more information, see Creating a task - /// definition that uses a FireLens configuration in the Amazon Elastic Container Service Developer Guide. Tasks hosted on Fargate only support the file configuration file type. + /// For more information, see Creating + /// a task definition that uses a FireLens configuration in the + /// Amazon Elastic Container Service Developer Guide. Tasks hosted on Fargate only support the file configuration file + /// type. public let options: [String: String]? - /// The log router to use. The valid values are fluentd or fluentbit. + /// The log router to use. The valid values are fluentd or + /// fluentbit. public let type: FirelensConfigurationType @inlinable @@ -3697,8 +3875,8 @@ extension ECS { } public struct GetTaskProtectionRequest: AWSEncodableShape { - /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task sets exist - /// in. + /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task + /// sets exist in. public let cluster: String /// A list of up to 100 task IDs or full ARN entries. public let tasks: [String]? @@ -3718,9 +3896,10 @@ extension ECS { public struct GetTaskProtectionResponse: AWSDecodableShape { /// Any failures associated with the call. public let failures: [Failure]? - /// A list of tasks with the following information. taskArn: The task ARN. protectionEnabled: The protection status of the task. If scale-in protection is - /// turned on for a task, the value is true. Otherwise, it is - /// false. expirationDate: The epoch time when protection for the task will expire. + /// A list of tasks with the following information. taskArn: The task ARN. protectionEnabled: The protection status of the task. If scale-in + /// protection is turned on for a task, the value is true. Otherwise, + /// it is false. expirationDate: The epoch time when protection for the task will + /// expire. public let protectedTasks: [ProtectedTask]? @inlinable @@ -3736,25 +3915,29 @@ extension ECS { } public struct HealthCheck: AWSEncodableShape & AWSDecodableShape { - /// A string array representing the command that the container runs to determine if it is healthy. The - /// string array must start with CMD to run the command arguments directly, or - /// CMD-SHELL to run the command with the container's default shell. When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list of commands in - /// double quotes and brackets. [ "CMD-SHELL", "curl -f http://localhost/ || exit 1" ] You don't include the double quotes and brackets when you use the Amazon Web Services Management Console. CMD-SHELL, curl -f http://localhost/ || exit 1 An exit code of 0 indicates success, and non-zero exit code indicates failure. For - /// more information, see HealthCheck in the docker container create command. + /// A string array representing the command that the container runs to determine if it is + /// healthy. The string array must start with CMD to run the command arguments + /// directly, or CMD-SHELL to run the command with the container's default + /// shell. When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list + /// of commands in double quotes and brackets. [ "CMD-SHELL", "curl -f http://localhost/ || exit 1" ] You don't include the double quotes and brackets when you use the Amazon Web Services Management Console. CMD-SHELL, curl -f http://localhost/ || exit 1 An exit code of 0 indicates success, and non-zero exit code indicates failure. For + /// more information, see HealthCheck in the docker container create + /// command. public let command: [String] - /// The time period in seconds between each health check execution. You may specify between 5 and 300 - /// seconds. The default value is 30 seconds. + /// The time period in seconds between each health check execution. You may specify + /// between 5 and 300 seconds. The default value is 30 seconds. public let interval: Int? - /// The number of times to retry a failed health check before the container is considered unhealthy. You - /// may specify between 1 and 10 retries. The default value is 3. + /// The number of times to retry a failed health check before the container is considered + /// unhealthy. You may specify between 1 and 10 retries. The default value is 3. public let retries: Int? - /// The optional grace period to provide containers time to bootstrap before failed health checks count - /// towards the maximum number of retries. You can specify between 0 and 300 seconds. By default, the - /// startPeriod is off. If a health check succeeds within the startPeriod, then the container is considered - /// healthy and any subsequent failures count toward the maximum number of retries. + /// The optional grace period to provide containers time to bootstrap before failed health + /// checks count towards the maximum number of retries. You can specify between 0 and 300 + /// seconds. By default, the startPeriod is off. If a health check succeeds within the startPeriod, then the container + /// is considered healthy and any subsequent failures count toward the maximum number of + /// retries. public let startPeriod: Int? - /// The time period in seconds to wait for a health check to succeed before it is considered a failure. - /// You may specify between 2 and 60 seconds. The default value is 5. + /// The time period in seconds to wait for a health check to succeed before it is + /// considered a failure. You may specify between 2 and 60 seconds. The default value is + /// 5. public let timeout: Int? @inlinable @@ -3794,14 +3977,15 @@ extension ECS { } public struct HostVolumeProperties: AWSEncodableShape & AWSDecodableShape { - /// When the host parameter is used, specify a sourcePath to declare the path - /// on the host container instance that's presented to the container. If this parameter is empty, then the - /// Docker daemon has assigned a host path for you. If the host parameter contains a - /// sourcePath file location, then the data volume persists at the specified location on - /// the host container instance until you delete it manually. If the sourcePath value doesn't - /// exist on the host container instance, the Docker daemon creates it. If the location does exist, the - /// contents of the source path folder are exported. If you're using the Fargate launch type, the sourcePath parameter is not - /// supported. + /// When the host parameter is used, specify a sourcePath to + /// declare the path on the host container instance that's presented to the container. If + /// this parameter is empty, then the Docker daemon has assigned a host path for you. If the + /// host parameter contains a sourcePath file location, then + /// the data volume persists at the specified location on the host container instance until + /// you delete it manually. If the sourcePath value doesn't exist on the host + /// container instance, the Docker daemon creates it. If the location does exist, the + /// contents of the source path folder are exported. If you're using the Fargate launch type, the sourcePath + /// parameter is not supported. public let sourcePath: String? @inlinable @@ -3815,8 +3999,8 @@ extension ECS { } public struct InferenceAccelerator: AWSEncodableShape & AWSDecodableShape { - /// The Elastic Inference accelerator device name. The deviceName must also be referenced in - /// a container definition as a ResourceRequirement. + /// The Elastic Inference accelerator device name. The deviceName must also + /// be referenced in a container definition as a ResourceRequirement. public let deviceName: String /// The Elastic Inference accelerator type to use. public let deviceType: String @@ -3834,8 +4018,8 @@ extension ECS { } public struct InferenceAcceleratorOverride: AWSEncodableShape & AWSDecodableShape { - /// The Elastic Inference accelerator device name to override for the task. This parameter must match a - /// deviceName specified in the task definition. + /// The Elastic Inference accelerator device name to override for the task. This parameter + /// must match a deviceName specified in the task definition. public let deviceName: String? /// The Elastic Inference accelerator type to use. public let deviceType: String? @@ -3855,7 +4039,8 @@ extension ECS { public struct InstanceHealthCheckResult: AWSDecodableShape { /// The Unix timestamp for when the container instance health status last changed. public let lastStatusChange: Date? - /// The Unix timestamp for when the container instance health status was last updated. + /// The Unix timestamp for when the container instance health status was last + /// updated. public let lastUpdated: Date? /// The container instance health status. public let status: InstanceHealthCheckState? @@ -3879,25 +4064,29 @@ extension ECS { } public struct KernelCapabilities: AWSEncodableShape & AWSDecodableShape { - /// The Linux capabilities for the container that have been added to the default configuration provided - /// by Docker. This parameter maps to CapAdd in the docker container create command and the - /// --cap-add option to docker run. Tasks launched on Fargate only support adding the SYS_PTRACE kernel - /// capability. Valid values: "ALL" | "AUDIT_CONTROL" | "AUDIT_WRITE" | "BLOCK_SUSPEND" | "CHOWN" | - /// "DAC_OVERRIDE" | "DAC_READ_SEARCH" | "FOWNER" | "FSETID" | "IPC_LOCK" | "IPC_OWNER" | "KILL" | - /// "LEASE" | "LINUX_IMMUTABLE" | "MAC_ADMIN" | "MAC_OVERRIDE" | "MKNOD" | "NET_ADMIN" | - /// "NET_BIND_SERVICE" | "NET_BROADCAST" | "NET_RAW" | "SETFCAP" | "SETGID" | "SETPCAP" | "SETUID" | - /// "SYS_ADMIN" | "SYS_BOOT" | "SYS_CHROOT" | "SYS_MODULE" | "SYS_NICE" | "SYS_PACCT" | "SYS_PTRACE" | - /// "SYS_RAWIO" | "SYS_RESOURCE" | "SYS_TIME" | "SYS_TTY_CONFIG" | "SYSLOG" | + /// The Linux capabilities for the container that have been added to the default + /// configuration provided by Docker. This parameter maps to CapAdd in the + /// docker container create command and the --cap-add option to docker + /// run. Tasks launched on Fargate only support adding the SYS_PTRACE kernel + /// capability. Valid values: "ALL" | "AUDIT_CONTROL" | "AUDIT_WRITE" | "BLOCK_SUSPEND" | + /// "CHOWN" | "DAC_OVERRIDE" | "DAC_READ_SEARCH" | "FOWNER" | "FSETID" | "IPC_LOCK" | + /// "IPC_OWNER" | "KILL" | "LEASE" | "LINUX_IMMUTABLE" | "MAC_ADMIN" | "MAC_OVERRIDE" | + /// "MKNOD" | "NET_ADMIN" | "NET_BIND_SERVICE" | "NET_BROADCAST" | "NET_RAW" | "SETFCAP" + /// | "SETGID" | "SETPCAP" | "SETUID" | "SYS_ADMIN" | "SYS_BOOT" | "SYS_CHROOT" | + /// "SYS_MODULE" | "SYS_NICE" | "SYS_PACCT" | "SYS_PTRACE" | "SYS_RAWIO" | + /// "SYS_RESOURCE" | "SYS_TIME" | "SYS_TTY_CONFIG" | "SYSLOG" | /// "WAKE_ALARM" public let add: [String]? - /// The Linux capabilities for the container that have been removed from the default configuration - /// provided by Docker. This parameter maps to CapDrop in the docker container create command - /// and the --cap-drop option to docker run. Valid values: "ALL" | "AUDIT_CONTROL" | "AUDIT_WRITE" | "BLOCK_SUSPEND" | "CHOWN" | - /// "DAC_OVERRIDE" | "DAC_READ_SEARCH" | "FOWNER" | "FSETID" | "IPC_LOCK" | "IPC_OWNER" | "KILL" | - /// "LEASE" | "LINUX_IMMUTABLE" | "MAC_ADMIN" | "MAC_OVERRIDE" | "MKNOD" | "NET_ADMIN" | - /// "NET_BIND_SERVICE" | "NET_BROADCAST" | "NET_RAW" | "SETFCAP" | "SETGID" | "SETPCAP" | "SETUID" | - /// "SYS_ADMIN" | "SYS_BOOT" | "SYS_CHROOT" | "SYS_MODULE" | "SYS_NICE" | "SYS_PACCT" | "SYS_PTRACE" | - /// "SYS_RAWIO" | "SYS_RESOURCE" | "SYS_TIME" | "SYS_TTY_CONFIG" | "SYSLOG" | + /// The Linux capabilities for the container that have been removed from the default + /// configuration provided by Docker. This parameter maps to CapDrop in the + /// docker container create command and the --cap-drop option to docker + /// run. Valid values: "ALL" | "AUDIT_CONTROL" | "AUDIT_WRITE" | "BLOCK_SUSPEND" | + /// "CHOWN" | "DAC_OVERRIDE" | "DAC_READ_SEARCH" | "FOWNER" | "FSETID" | "IPC_LOCK" | + /// "IPC_OWNER" | "KILL" | "LEASE" | "LINUX_IMMUTABLE" | "MAC_ADMIN" | "MAC_OVERRIDE" | + /// "MKNOD" | "NET_ADMIN" | "NET_BIND_SERVICE" | "NET_BROADCAST" | "NET_RAW" | "SETFCAP" + /// | "SETGID" | "SETPCAP" | "SETUID" | "SYS_ADMIN" | "SYS_BOOT" | "SYS_CHROOT" | + /// "SYS_MODULE" | "SYS_NICE" | "SYS_PACCT" | "SYS_PTRACE" | "SYS_RAWIO" | + /// "SYS_RESOURCE" | "SYS_TIME" | "SYS_TTY_CONFIG" | "SYSLOG" | /// "WAKE_ALARM" public let drop: [String]? @@ -3914,11 +4103,11 @@ extension ECS { } public struct KeyValuePair: AWSEncodableShape & AWSDecodableShape { - /// The name of the key-value pair. For environment variables, this is the name of the environment - /// variable. + /// The name of the key-value pair. For environment variables, this is the name of the + /// environment variable. public let name: String? - /// The value of the key-value pair. For environment variables, this is the value of the environment - /// variable. + /// The value of the key-value pair. For environment variables, this is the value of the + /// environment variable. public let value: String? @inlinable @@ -3934,42 +4123,49 @@ extension ECS { } public struct LinuxParameters: AWSEncodableShape & AWSDecodableShape { - /// The Linux capabilities for the container that are added to or dropped from the default configuration - /// provided by Docker. For tasks that use the Fargate launch type, capabilities is supported - /// for all platform versions but the add parameter is only supported if using platform - /// version 1.4.0 or later. + /// The Linux capabilities for the container that are added to or dropped from the default + /// configuration provided by Docker. For tasks that use the Fargate launch type, + /// capabilities is supported for all platform versions but the + /// add parameter is only supported if using platform version 1.4.0 or + /// later. public let capabilities: KernelCapabilities? - /// Any host devices to expose to the container. This parameter maps to Devices in the - /// docker container create command and the --device option to docker run. If you're using tasks that use the Fargate launch type, the devices - /// parameter isn't supported. + /// Any host devices to expose to the container. This parameter maps to + /// Devices in the docker container create command and the + /// --device option to docker run. If you're using tasks that use the Fargate launch type, the + /// devices parameter isn't supported. public let devices: [Device]? - /// Run an init process inside the container that forwards signals and reaps processes. This - /// parameter maps to the --init option to docker run. This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}' + /// Run an init process inside the container that forwards signals and reaps + /// processes. This parameter maps to the --init option to docker run. + /// This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}' public let initProcessEnabled: Bool? - /// The total amount of swap memory (in MiB) a container can use. This parameter will be translated to - /// the --memory-swap option to docker run where the value would be the sum of the container - /// memory plus the maxSwap value. If a maxSwap value of 0 is specified, the container will not use swap. - /// Accepted values are 0 or any positive integer. If the maxSwap parameter is - /// omitted, the container will use the swap configuration for the container instance it is running on. A - /// maxSwap value must be set for the swappiness parameter to be used. If you're using tasks that use the Fargate launch type, the maxSwap - /// parameter isn't supported. If you're using tasks on Amazon Linux 2023 the swappiness parameter isn't supported. + /// The total amount of swap memory (in MiB) a container can use. This parameter will be + /// translated to the --memory-swap option to docker run where the value would + /// be the sum of the container memory plus the maxSwap value. If a maxSwap value of 0 is specified, the container will not + /// use swap. Accepted values are 0 or any positive integer. If the + /// maxSwap parameter is omitted, the container will use the swap + /// configuration for the container instance it is running on. A maxSwap value + /// must be set for the swappiness parameter to be used. If you're using tasks that use the Fargate launch type, the + /// maxSwap parameter isn't supported. If you're using tasks on Amazon Linux 2023 the swappiness parameter isn't + /// supported. public let maxSwap: Int? - /// The value for the size (in MiB) of the /dev/shm volume. This parameter maps to the - /// --shm-size option to docker run. If you are using tasks that use the Fargate launch type, the + /// The value for the size (in MiB) of the /dev/shm volume. This parameter + /// maps to the --shm-size option to docker run. If you are using tasks that use the Fargate launch type, the /// sharedMemorySize parameter is not supported. public let sharedMemorySize: Int? - /// This allows you to tune a container's memory swappiness behavior. A swappiness value of - /// 0 will cause swapping to not happen unless absolutely necessary. A - /// swappiness value of 100 will cause pages to be swapped very aggressively. - /// Accepted values are whole numbers between 0 and 100. If the - /// swappiness parameter is not specified, a default value of 60 is used. If - /// a value is not specified for maxSwap then this parameter is ignored. This parameter maps - /// to the --memory-swappiness option to docker run. If you're using tasks that use the Fargate launch type, the swappiness - /// parameter isn't supported. If you're using tasks on Amazon Linux 2023 the swappiness parameter isn't supported. + /// This allows you to tune a container's memory swappiness behavior. A + /// swappiness value of 0 will cause swapping to not happen + /// unless absolutely necessary. A swappiness value of 100 will + /// cause pages to be swapped very aggressively. Accepted values are whole numbers between + /// 0 and 100. If the swappiness parameter is not + /// specified, a default value of 60 is used. If a value is not specified for + /// maxSwap then this parameter is ignored. This parameter maps to the + /// --memory-swappiness option to docker run. If you're using tasks that use the Fargate launch type, the + /// swappiness parameter isn't supported. If you're using tasks on Amazon Linux 2023 the swappiness parameter isn't + /// supported. public let swappiness: Int? - /// The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the - /// --tmpfs option to docker run. If you're using tasks that use the Fargate launch type, the tmpfs - /// parameter isn't supported. + /// The container path, mount options, and size (in MiB) of the tmpfs mount. This + /// parameter maps to the --tmpfs option to docker run. If you're using tasks that use the Fargate launch type, the + /// tmpfs parameter isn't supported. public let tmpfs: [Tmpfs]? @inlinable @@ -3995,34 +4191,35 @@ extension ECS { } public struct ListAccountSettingsRequest: AWSEncodableShape { - /// Determines whether to return the effective settings. If true, the account settings for - /// the root user or the default setting for the principalArn are returned. If - /// false, the account settings for the principalArn are returned if they're set. - /// Otherwise, no account settings are returned. + /// Determines whether to return the effective settings. If true, the account + /// settings for the root user or the default setting for the principalArn are + /// returned. If false, the account settings for the principalArn + /// are returned if they're set. Otherwise, no account settings are returned. public let effectiveSettings: Bool? - /// The maximum number of account setting results returned by ListAccountSettings in - /// paginated output. When this parameter is used, ListAccountSettings only returns - /// maxResults results in a single page along with a nextToken response - /// element. The remaining results of the initial request can be seen by sending another - /// ListAccountSettings request with the returned nextToken value. This value - /// can be between 1 and 10. If this + /// The maximum number of account setting results returned by + /// ListAccountSettings in paginated output. When this parameter is used, + /// ListAccountSettings only returns maxResults results in a + /// single page along with a nextToken response element. The remaining results + /// of the initial request can be seen by sending another ListAccountSettings + /// request with the returned nextToken value. This value can be between + /// 1 and 10. If this /// parameter isn't used, then ListAccountSettings returns up to - /// 10 results and a nextToken value if - /// applicable. + /// 10 results and a nextToken value + /// if applicable. public let maxResults: Int? /// The name of the account setting you want to list the settings for. public let name: SettingName? - /// The nextToken value returned from a ListAccountSettings request indicating - /// that more results are available to fulfill the request and further calls will be needed. If - /// maxResults was provided, it's possible the number of results to be fewer than - /// maxResults. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. + /// The nextToken value returned from a ListAccountSettings + /// request indicating that more results are available to fulfill the request and further + /// calls will be needed. If maxResults was provided, it's possible the number + /// of results to be fewer than maxResults. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. public let nextToken: String? - /// The ARN of the principal, which can be a user, role, or the root user. If this field is omitted, the - /// account settings are listed only for the authenticated user. Federated users assume the account setting of the root user and can't have explicit account settings - /// set for them. + /// The ARN of the principal, which can be a user, role, or the root user. If this field is + /// omitted, the account settings are listed only for the authenticated user. Federated users assume the account setting of the root user and can't have explicit + /// account settings set for them. public let principalArn: String? - /// The value of the account settings to filter results with. You must also specify an account setting - /// name to use this parameter. + /// The value of the account settings to filter results with. You must also specify an + /// account setting name to use this parameter. public let value: String? @inlinable @@ -4046,10 +4243,11 @@ extension ECS { } public struct ListAccountSettingsResponse: AWSDecodableShape { - /// The nextToken value to include in a future ListAccountSettings request. - /// When the results of a ListAccountSettings request exceed maxResults, this - /// value can be used to retrieve the next page of results. This value is null when there are - /// no more results to return. + /// The nextToken value to include in a future + /// ListAccountSettings request. When the results of a + /// ListAccountSettings request exceed maxResults, this value + /// can be used to retrieve the next page of results. This value is null when + /// there are no more results to return. public let nextToken: String? /// The account settings for the resource. public let settings: [Setting]? @@ -4069,23 +4267,25 @@ extension ECS { public struct ListAttributesRequest: AWSEncodableShape { /// The name of the attribute to filter the results with. public let attributeName: String? - /// The value of the attribute to filter results with. You must also specify an attribute name to use - /// this parameter. + /// The value of the attribute to filter results with. You must also specify an attribute + /// name to use this parameter. public let attributeValue: String? - /// The short name or full Amazon Resource Name (ARN) of the cluster to list attributes. If you do not specify a cluster, the default cluster is assumed. + /// The short name or full Amazon Resource Name (ARN) of the cluster to list attributes. + /// If you do not specify a cluster, the default cluster is assumed. public let cluster: String? - /// The maximum number of cluster results that ListAttributes returned in paginated output. - /// When this parameter is used, ListAttributes only returns maxResults results - /// in a single page along with a nextToken response element. The remaining results of the - /// initial request can be seen by sending another ListAttributes request with the returned - /// nextToken value. This value can be between 1 and 100. If - /// this parameter isn't used, then ListAttributes returns up to 100 results - /// and a nextToken value if applicable. + /// The maximum number of cluster results that ListAttributes returned in + /// paginated output. When this parameter is used, ListAttributes only returns + /// maxResults results in a single page along with a nextToken + /// response element. The remaining results of the initial request can be seen by sending + /// another ListAttributes request with the returned nextToken + /// value. This value can be between 1 and 100. If this + /// parameter isn't used, then ListAttributes returns up to + /// 100 results and a nextToken value if applicable. public let maxResults: Int? - /// The nextToken value returned from a ListAttributes request indicating that - /// more results are available to fulfill the request and further calls are needed. If - /// maxResults was provided, it's possible the number of results to be fewer than - /// maxResults. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. + /// The nextToken value returned from a ListAttributes request + /// indicating that more results are available to fulfill the request and further calls are + /// needed. If maxResults was provided, it's possible the number of results to + /// be fewer than maxResults. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. public let nextToken: String? /// The type of the target to list attributes with. public let targetType: TargetType @@ -4113,9 +4313,10 @@ extension ECS { public struct ListAttributesResponse: AWSDecodableShape { /// A list of attribute objects that meet the criteria of the request. public let attributes: [Attribute]? - /// The nextToken value to include in a future ListAttributes request. When the - /// results of a ListAttributes request exceed maxResults, this value can be used - /// to retrieve the next page of results. This value is null when there are no more results to + /// The nextToken value to include in a future ListAttributes + /// request. When the results of a ListAttributes request exceed + /// maxResults, this value can be used to retrieve the next page of + /// results. This value is null when there are no more results to /// return. public let nextToken: String? @@ -4132,18 +4333,19 @@ extension ECS { } public struct ListClustersRequest: AWSEncodableShape { - /// The maximum number of cluster results that ListClusters returned in paginated output. - /// When this parameter is used, ListClusters only returns maxResults results in - /// a single page along with a nextToken response element. The remaining results of the - /// initial request can be seen by sending another ListClusters request with the returned - /// nextToken value. This value can be between 1 and 100. If - /// this parameter isn't used, then ListClusters returns up to 100 results - /// and a nextToken value if applicable. + /// The maximum number of cluster results that ListClusters returned in + /// paginated output. When this parameter is used, ListClusters only returns + /// maxResults results in a single page along with a nextToken + /// response element. The remaining results of the initial request can be seen by sending + /// another ListClusters request with the returned nextToken + /// value. This value can be between 1 and 100. If this + /// parameter isn't used, then ListClusters returns up to 100 + /// results and a nextToken value if applicable. public let maxResults: Int? - /// The nextToken value returned from a ListClusters request indicating that - /// more results are available to fulfill the request and further calls are needed. If - /// maxResults was provided, it's possible the number of results to be fewer than - /// maxResults. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. + /// The nextToken value returned from a ListClusters request + /// indicating that more results are available to fulfill the request and further calls are + /// needed. If maxResults was provided, it's possible the number of results to + /// be fewer than maxResults. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. public let nextToken: String? @inlinable @@ -4159,11 +4361,13 @@ extension ECS { } public struct ListClustersResponse: AWSDecodableShape { - /// The list of full Amazon Resource Name (ARN) entries for each cluster that's associated with your account. + /// The list of full Amazon Resource Name (ARN) entries for each cluster that's associated with your + /// account. public let clusterArns: [String]? - /// The nextToken value to include in a future ListClusters request. When the - /// results of a ListClusters request exceed maxResults, this value can be used - /// to retrieve the next page of results. This value is null when there are no more results to + /// The nextToken value to include in a future ListClusters + /// request. When the results of a ListClusters request exceed + /// maxResults, this value can be used to retrieve the next page of + /// results. This value is null when there are no more results to /// return. public let nextToken: String? @@ -4180,31 +4384,33 @@ extension ECS { } public struct ListContainerInstancesRequest: AWSEncodableShape { - /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to list. - /// If you do not specify a cluster, the default cluster is assumed. + /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to + /// list. If you do not specify a cluster, the default cluster is assumed. public let cluster: String? - /// You can filter the results of a ListContainerInstances operation with cluster query - /// language statements. For more information, see Cluster - /// Query Language in the Amazon Elastic Container Service Developer Guide. + /// You can filter the results of a ListContainerInstances operation with + /// cluster query language statements. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide. public let filter: String? - /// The maximum number of container instance results that ListContainerInstances returned in - /// paginated output. When this parameter is used, ListContainerInstances only returns - /// maxResults results in a single page along with a nextToken response - /// element. The remaining results of the initial request can be seen by sending another - /// ListContainerInstances request with the returned nextToken value. This - /// value can be between 1 and 100. If this parameter isn't used, then - /// ListContainerInstances returns up to 100 results and a - /// nextToken value if applicable. + /// The maximum number of container instance results that + /// ListContainerInstances returned in paginated output. When this + /// parameter is used, ListContainerInstances only returns + /// maxResults results in a single page along with a nextToken + /// response element. The remaining results of the initial request can be seen by sending + /// another ListContainerInstances request with the returned + /// nextToken value. This value can be between 1 and + /// 100. If this parameter isn't used, then + /// ListContainerInstances returns up to 100 results and + /// a nextToken value if applicable. public let maxResults: Int? - /// The nextToken value returned from a ListContainerInstances request - /// indicating that more results are available to fulfill the request and further calls are needed. If - /// maxResults was provided, it's possible the number of results to be fewer than - /// maxResults. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. + /// The nextToken value returned from a ListContainerInstances + /// request indicating that more results are available to fulfill the request and further + /// calls are needed. If maxResults was provided, it's possible the number of + /// results to be fewer than maxResults. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. public let nextToken: String? - /// Filters the container instances by status. For example, if you specify the DRAINING - /// status, the results include only container instances that have been set to DRAINING using - /// UpdateContainerInstancesState. If you don't specify this parameter, the default is to - /// include container instances set to all states other than INACTIVE. + /// Filters the container instances by status. For example, if you specify the + /// DRAINING status, the results include only container instances that have + /// been set to DRAINING using UpdateContainerInstancesState. If you don't specify this parameter, the + /// default is to include container instances set to all states other than + /// INACTIVE. public let status: ContainerInstanceStatus? @inlinable @@ -4226,13 +4432,14 @@ extension ECS { } public struct ListContainerInstancesResponse: AWSDecodableShape { - /// The list of container instances with full ARN entries for each container instance associated with - /// the specified cluster. + /// The list of container instances with full ARN entries for each container instance + /// associated with the specified cluster. public let containerInstanceArns: [String]? - /// The nextToken value to include in a future ListContainerInstances request. - /// When the results of a ListContainerInstances request exceed maxResults, this - /// value can be used to retrieve the next page of results. This value is null when there are - /// no more results to return. + /// The nextToken value to include in a future + /// ListContainerInstances request. When the results of a + /// ListContainerInstances request exceed maxResults, this + /// value can be used to retrieve the next page of results. This value is null + /// when there are no more results to return. public let nextToken: String? @inlinable @@ -4248,33 +4455,35 @@ extension ECS { } public struct ListServiceDeploymentsRequest: AWSEncodableShape { - /// The cluster that hosts the service. This can either be the cluster name or ARN. Starting - /// April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic - /// Inference (EI), and will help current customers migrate their workloads to options that - /// offer better price and performanceIf you don't specify a cluster, default - /// is used. + /// The cluster that hosts the service. This can either be the cluster name or ARN. + /// Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon + /// Elastic Inference (EI), and will help current customers migrate their workloads to + /// options that offer better price and performance. If you don't specify a cluster, + /// default is used. public let cluster: String? - /// An optional filter you can use to narrow the results by the service creation date. If you do - /// not specify a value, the result includes all services created before the current - /// time. The - /// format is yyyy-MM-dd HH:mm:ss.SSSSSS. + /// An optional filter you can use to narrow the results by the service creation date. If + /// you do not specify a value, the result includes all services created before the current + /// time. The format is yyyy-MM-dd HH:mm:ss.SSSSSS. public let createdAt: CreatedAt? - /// The maximum number of service deployment results that ListServiceDeployments - /// returned in paginated output. When this parameter is used, - /// ListServiceDeployments only returns maxResults results in - /// a single page along with a nextToken response element. The remaining - /// results of the initial request can be seen by sending another - /// ListServiceDeployments request with the returned nextToken - /// value. This value can be between 1 and 100. If this parameter isn't used, then - /// ListServiceDeployments returns up to 20 results and a - /// nextToken value if applicable. + /// The maximum number of service deployment results that + /// ListServiceDeployments returned in paginated output. When this + /// parameter is used, ListServiceDeployments only returns + /// maxResults results in a single page along with a nextToken + /// response element. The remaining results of the initial request can be seen by sending + /// another ListServiceDeployments request with the returned + /// nextToken value. This value can be between 1 and 100. If this parameter + /// isn't used, then ListServiceDeployments returns up to 20 results and a + /// nextToken value if applicable. public let maxResults: Int? - /// The nextToken value returned from a ListServiceDeployments request indicating that more results are available to fulfill the request and further calls are needed. If you provided maxResults, it's possible the number of results is fewer than maxResults. + /// The nextToken value returned from a ListServiceDeployments + /// request indicating that more results are available to fulfill the request and further + /// calls are needed. If you provided maxResults, it's possible the number of + /// results is fewer than maxResults. public let nextToken: String? /// The ARN or name of the service public let service: String - /// An optional filter you can use to narrow the results. If you do not specify a status, then - /// all status values are included in the result. + /// An optional filter you can use to narrow the results. If you do not specify a status, + /// then all status values are included in the result. public let status: [ServiceDeploymentStatus]? @inlinable @@ -4298,10 +4507,14 @@ extension ECS { } public struct ListServiceDeploymentsResponse: AWSDecodableShape { - /// The nextToken value to include in a future ListServiceDeployments request. When the results of a ListServiceDeployments request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return. + /// The nextToken value to include in a future + /// ListServiceDeployments request. When the results of a + /// ListServiceDeployments request exceed maxResults, this + /// value can be used to retrieve the next page of results. This value is null when there + /// are no more results to return. public let nextToken: String? - /// An overview of the service deployment, including the following - /// properties: The ARN of the service deployment. The ARN of the service being deployed. The ARN of the cluster that hosts the service in the service deployment. The time that the service deployment started. The time that the service deployment completed. The service deployment status. Information about why the service deployment is in the current state. The ARN of the service revision that is being deployed. + /// An overview of the service deployment, including the following properties: The ARN of the service deployment. The ARN of the service being deployed. The ARN of the cluster that hosts the service in the service + /// deployment. The time that the service deployment started. The time that the service deployment completed. The service deployment status. Information about why the service deployment is in the current state. The ARN of the service revision that is being deployed. public let serviceDeployments: [ServiceDeploymentBrief]? @inlinable @@ -4317,15 +4530,17 @@ extension ECS { } public struct ListServicesByNamespaceRequest: AWSEncodableShape { - /// The maximum number of service results that ListServicesByNamespace returns in paginated - /// output. When this parameter is used, ListServicesByNamespace only returns - /// maxResults results in a single page along with a nextToken response - /// element. The remaining results of the initial request can be seen by sending another - /// ListServicesByNamespace request with the returned nextToken value. This - /// value can be between 1 and 100. If this parameter - /// isn't used, then ListServicesByNamespace returns up to - /// 10 results and a nextToken value if - /// applicable. + /// The maximum number of service results that ListServicesByNamespace + /// returns in paginated output. When this parameter is used, + /// ListServicesByNamespace only returns maxResults results in + /// a single page along with a nextToken response element. The remaining + /// results of the initial request can be seen by sending another + /// ListServicesByNamespace request with the returned + /// nextToken value. This value can be between 1 and + /// 100. If this parameter isn't used, then + /// ListServicesByNamespace returns up to + /// 10 results and a nextToken + /// value if applicable. public let maxResults: Int? /// The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace to list the services in. Tasks that run in a namespace can use short names to connect /// to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. @@ -4334,10 +4549,11 @@ extension ECS { /// Only the tasks that Amazon ECS services create are supported with Service Connect. /// For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide. public let namespace: String - /// The nextToken value that's returned from a ListServicesByNamespace request. - /// It indicates that more results are available to fulfill the request and further calls are needed. If - /// maxResults is returned, it is possible the number of results is less than - /// maxResults. + /// The nextToken value that's returned from a + /// ListServicesByNamespace request. It indicates that more results are + /// available to fulfill the request and further calls are needed. If + /// maxResults is returned, it is possible the number of results is less + /// than maxResults. public let nextToken: String? @inlinable @@ -4355,10 +4571,11 @@ extension ECS { } public struct ListServicesByNamespaceResponse: AWSDecodableShape { - /// The nextToken value to include in a future ListServicesByNamespace request. - /// When the results of a ListServicesByNamespace request exceed maxResults, this - /// value can be used to retrieve the next page of results. When there are no more results to return, this - /// value is null. + /// The nextToken value to include in a future + /// ListServicesByNamespace request. When the results of a + /// ListServicesByNamespace request exceed maxResults, this + /// value can be used to retrieve the next page of results. When there are no more results + /// to return, this value is null. public let nextToken: String? /// The list of full ARN entries for each service that's associated with the specified /// namespace. @@ -4377,25 +4594,28 @@ extension ECS { } public struct ListServicesRequest: AWSEncodableShape { - /// The short name or full Amazon Resource Name (ARN) of the cluster to use when filtering the ListServices - /// results. If you do not specify a cluster, the default cluster is assumed. + /// The short name or full Amazon Resource Name (ARN) of the cluster to use when filtering the + /// ListServices results. If you do not specify a cluster, the default cluster is assumed. public let cluster: String? /// The launch type to use when filtering the ListServices results. public let launchType: LaunchType? - /// The maximum number of service results that ListServices returned in paginated output. - /// When this parameter is used, ListServices only returns maxResults results in - /// a single page along with a nextToken response element. The remaining results of the - /// initial request can be seen by sending another ListServices request with the returned - /// nextToken value. This value can be between 1 and - /// 100. If this parameter isn't used, then ListServices returns up to - /// 10 results and a nextToken value if applicable. + /// The maximum number of service results that ListServices returned in + /// paginated output. When this parameter is used, ListServices only returns + /// maxResults results in a single page along with a nextToken + /// response element. The remaining results of the initial request can be seen by sending + /// another ListServices request with the returned nextToken + /// value. This value can be between 1 and 100. If + /// this parameter isn't used, then ListServices returns up to + /// 10 results and a nextToken value if + /// applicable. public let maxResults: Int? - /// The nextToken value returned from a ListServices request indicating that - /// more results are available to fulfill the request and further calls will be needed. If - /// maxResults was provided, it is possible the number of results to be fewer than - /// maxResults. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. + /// The nextToken value returned from a ListServices request + /// indicating that more results are available to fulfill the request and further calls will + /// be needed. If maxResults was provided, it is possible the number of results + /// to be fewer than maxResults. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. public let nextToken: String? - /// The scheduling strategy to use when filtering the ListServices results. + /// The scheduling strategy to use when filtering the ListServices + /// results. public let schedulingStrategy: SchedulingStrategy? @inlinable @@ -4417,12 +4637,14 @@ extension ECS { } public struct ListServicesResponse: AWSDecodableShape { - /// The nextToken value to include in a future ListServices request. When the - /// results of a ListServices request exceed maxResults, this value can be used - /// to retrieve the next page of results. This value is null when there are no more results to + /// The nextToken value to include in a future ListServices + /// request. When the results of a ListServices request exceed + /// maxResults, this value can be used to retrieve the next page of + /// results. This value is null when there are no more results to /// return. public let nextToken: String? - /// The list of full ARN entries for each service that's associated with the specified cluster. + /// The list of full ARN entries for each service that's associated with the specified + /// cluster. public let serviceArns: [String]? @inlinable @@ -4438,8 +4660,9 @@ extension ECS { } public struct ListTagsForResourceRequest: AWSEncodableShape { - /// The Amazon Resource Name (ARN) that identifies the resource to list the tags for. Currently, the supported resources - /// are Amazon ECS tasks, services, task definitions, clusters, and container instances. + /// The Amazon Resource Name (ARN) that identifies the resource to list the tags for. Currently, the + /// supported resources are Amazon ECS tasks, services, task definitions, clusters, and container + /// instances. public let resourceArn: String @inlinable @@ -4468,30 +4691,36 @@ extension ECS { public struct ListTaskDefinitionFamiliesRequest: AWSEncodableShape { /// The familyPrefix is a string that's used to filter the results of - /// ListTaskDefinitionFamilies. If you specify a familyPrefix, only task - /// definition family names that begin with the familyPrefix string are returned. + /// ListTaskDefinitionFamilies. If you specify a familyPrefix, + /// only task definition family names that begin with the familyPrefix string + /// are returned. public let familyPrefix: String? - /// The maximum number of task definition family results that ListTaskDefinitionFamilies - /// returned in paginated output. When this parameter is used, ListTaskDefinitions only - /// returns maxResults results in a single page along with a nextToken response - /// element. The remaining results of the initial request can be seen by sending another - /// ListTaskDefinitionFamilies request with the returned nextToken value. - /// This value can be between 1 and 100. If this parameter isn't used, then - /// ListTaskDefinitionFamilies returns up to 100 results and a - /// nextToken value if applicable. + /// The maximum number of task definition family results that + /// ListTaskDefinitionFamilies returned in paginated output. When this + /// parameter is used, ListTaskDefinitions only returns maxResults + /// results in a single page along with a nextToken response element. The + /// remaining results of the initial request can be seen by sending another + /// ListTaskDefinitionFamilies request with the returned + /// nextToken value. This value can be between 1 and + /// 100. If this parameter isn't used, then + /// ListTaskDefinitionFamilies returns up to 100 results + /// and a nextToken value if applicable. public let maxResults: Int? - /// The nextToken value returned from a ListTaskDefinitionFamilies request - /// indicating that more results are available to fulfill the request and further calls will be needed. If - /// maxResults was provided, it is possible the number of results to be fewer than - /// maxResults. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. + /// The nextToken value returned from a + /// ListTaskDefinitionFamilies request indicating that more results are + /// available to fulfill the request and further calls will be needed. If + /// maxResults was provided, it is possible the number of results to be + /// fewer than maxResults. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. public let nextToken: String? - /// The task definition family status to filter the ListTaskDefinitionFamilies results with. - /// By default, both ACTIVE and INACTIVE task definition families are listed. If - /// this parameter is set to ACTIVE, only task definition families that have an - /// ACTIVE task definition revision are returned. If this parameter is set to - /// INACTIVE, only task definition families that do not have any ACTIVE task - /// definition revisions are returned. If you paginate the resulting output, be sure to keep the - /// status value constant in each subsequent request. + /// The task definition family status to filter the + /// ListTaskDefinitionFamilies results with. By default, both + /// ACTIVE and INACTIVE task definition families are listed. + /// If this parameter is set to ACTIVE, only task definition families that have + /// an ACTIVE task definition revision are returned. If this parameter is set + /// to INACTIVE, only task definition families that do not have any + /// ACTIVE task definition revisions are returned. If you paginate the + /// resulting output, be sure to keep the status value constant in each + /// subsequent request. public let status: TaskDefinitionFamilyStatus? @inlinable @@ -4511,13 +4740,14 @@ extension ECS { } public struct ListTaskDefinitionFamiliesResponse: AWSDecodableShape { - /// The list of task definition family names that match the ListTaskDefinitionFamilies - /// request. + /// The list of task definition family names that match the + /// ListTaskDefinitionFamilies request. public let families: [String]? - /// The nextToken value to include in a future ListTaskDefinitionFamilies - /// request. When the results of a ListTaskDefinitionFamilies request exceed - /// maxResults, this value can be used to retrieve the next page of results. This value is - /// null when there are no more results to return. + /// The nextToken value to include in a future + /// ListTaskDefinitionFamilies request. When the results of a + /// ListTaskDefinitionFamilies request exceed maxResults, this + /// value can be used to retrieve the next page of results. This value is null + /// when there are no more results to return. public let nextToken: String? @inlinable @@ -4533,35 +4763,38 @@ extension ECS { } public struct ListTaskDefinitionsRequest: AWSEncodableShape { - /// The full family name to filter the ListTaskDefinitions results with. Specifying a - /// familyPrefix limits the listed task definitions to task definition revisions that - /// belong to that family. + /// The full family name to filter the ListTaskDefinitions results with. + /// Specifying a familyPrefix limits the listed task definitions to task + /// definition revisions that belong to that family. public let familyPrefix: String? - /// The maximum number of task definition results that ListTaskDefinitions returned in - /// paginated output. When this parameter is used, ListTaskDefinitions only returns - /// maxResults results in a single page along with a nextToken response - /// element. The remaining results of the initial request can be seen by sending another - /// ListTaskDefinitions request with the returned nextToken value. This value - /// can be between 1 and 100. If this parameter isn't used, then + /// The maximum number of task definition results that ListTaskDefinitions + /// returned in paginated output. When this parameter is used, + /// ListTaskDefinitions only returns maxResults results in a + /// single page along with a nextToken response element. The remaining results + /// of the initial request can be seen by sending another ListTaskDefinitions + /// request with the returned nextToken value. This value can be between + /// 1 and 100. If this parameter isn't used, then /// ListTaskDefinitions returns up to 100 results and a /// nextToken value if applicable. public let maxResults: Int? - /// The nextToken value returned from a ListTaskDefinitions request indicating - /// that more results are available to fulfill the request and further calls will be needed. If - /// maxResults was provided, it is possible the number of results to be fewer than - /// maxResults. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. + /// The nextToken value returned from a ListTaskDefinitions + /// request indicating that more results are available to fulfill the request and further + /// calls will be needed. If maxResults was provided, it is possible the number + /// of results to be fewer than maxResults. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. public let nextToken: String? - /// The order to sort the results in. Valid values are ASC and DESC. By - /// default, (ASC) task definitions are listed lexicographically by family name and in - /// ascending numerical order by revision so that the newest task definitions in a family are listed last. - /// Setting this parameter to DESC reverses the sort order on family name and revision. This - /// is so that the newest task definitions in a family are listed first. + /// The order to sort the results in. Valid values are ASC and + /// DESC. By default, (ASC) task definitions are listed + /// lexicographically by family name and in ascending numerical order by revision so that + /// the newest task definitions in a family are listed last. Setting this parameter to + /// DESC reverses the sort order on family name and revision. This is so + /// that the newest task definitions in a family are listed first. public let sort: SortOrder? - /// The task definition status to filter the ListTaskDefinitions results with. By default, - /// only ACTIVE task definitions are listed. By setting this parameter to - /// INACTIVE, you can view task definitions that are INACTIVE as long as an - /// active task or service still references them. If you paginate the resulting output, be sure to keep the - /// status value constant in each subsequent request. + /// The task definition status to filter the ListTaskDefinitions results + /// with. By default, only ACTIVE task definitions are listed. By setting this + /// parameter to INACTIVE, you can view task definitions that are + /// INACTIVE as long as an active task or service still references them. If + /// you paginate the resulting output, be sure to keep the status value + /// constant in each subsequent request. public let status: TaskDefinitionStatus? @inlinable @@ -4583,10 +4816,11 @@ extension ECS { } public struct ListTaskDefinitionsResponse: AWSDecodableShape { - /// The nextToken value to include in a future ListTaskDefinitions request. - /// When the results of a ListTaskDefinitions request exceed maxResults, this - /// value can be used to retrieve the next page of results. This value is null when there are - /// no more results to return. + /// The nextToken value to include in a future + /// ListTaskDefinitions request. When the results of a + /// ListTaskDefinitions request exceed maxResults, this value + /// can be used to retrieve the next page of results. This value is null when + /// there are no more results to return. public let nextToken: String? /// The list of task definition Amazon Resource Name (ARN) entries for the ListTaskDefinitions /// request. @@ -4605,45 +4839,52 @@ extension ECS { } public struct ListTasksRequest: AWSEncodableShape { - /// The short name or full Amazon Resource Name (ARN) of the cluster to use when filtering the ListTasks - /// results. If you do not specify a cluster, the default cluster is assumed. + /// The short name or full Amazon Resource Name (ARN) of the cluster to use when filtering the + /// ListTasks results. If you do not specify a cluster, the default cluster is assumed. public let cluster: String? - /// The container instance ID or full ARN of the container instance to use when filtering the - /// ListTasks results. Specifying a containerInstance limits the results to - /// tasks that belong to that container instance. + /// The container instance ID or full ARN of the container instance to use when + /// filtering the ListTasks results. Specifying a + /// containerInstance limits the results to tasks that belong to that + /// container instance. public let containerInstance: String? - /// The task desired status to use when filtering the ListTasks results. Specifying a - /// desiredStatus of STOPPED limits the results to tasks that Amazon ECS has set - /// the desired status to STOPPED. This can be useful for debugging tasks that aren't starting - /// properly or have died or finished. The default status filter is RUNNING, which shows tasks - /// that Amazon ECS has set the desired status to RUNNING. Although you can filter results based on a desired status of PENDING, this doesn't - /// return any results. Amazon ECS never sets the desired status of a task to that value (only a task's - /// lastStatus may have a value of PENDING). + /// The task desired status to use when filtering the ListTasks results. + /// Specifying a desiredStatus of STOPPED limits the results to + /// tasks that Amazon ECS has set the desired status to STOPPED. This can be useful + /// for debugging tasks that aren't starting properly or have died or finished. The default + /// status filter is RUNNING, which shows tasks that Amazon ECS has set the desired + /// status to RUNNING. Although you can filter results based on a desired status of PENDING, + /// this doesn't return any results. Amazon ECS never sets the desired status of a task to + /// that value (only a task's lastStatus may have a value of + /// PENDING). public let desiredStatus: DesiredStatus? - /// The name of the task definition family to use when filtering the ListTasks results. - /// Specifying a family limits the results to tasks that belong to that family. + /// The name of the task definition family to use when filtering the + /// ListTasks results. Specifying a family limits the results + /// to tasks that belong to that family. public let family: String? /// The launch type to use when filtering the ListTasks results. public let launchType: LaunchType? - /// The maximum number of task results that ListTasks returned in paginated output. When - /// this parameter is used, ListTasks only returns maxResults results in a single - /// page along with a nextToken response element. The remaining results of the initial request - /// can be seen by sending another ListTasks request with the returned nextToken - /// value. This value can be between 1 and 100. If this parameter isn't used, - /// then ListTasks returns up to 100 results and a nextToken - /// value if applicable. + /// The maximum number of task results that ListTasks returned in paginated + /// output. When this parameter is used, ListTasks only returns + /// maxResults results in a single page along with a nextToken + /// response element. The remaining results of the initial request can be seen by sending + /// another ListTasks request with the returned nextToken value. + /// This value can be between 1 and 100. If this parameter + /// isn't used, then ListTasks returns up to 100 results and + /// a nextToken value if applicable. public let maxResults: Int? - /// The nextToken value returned from a ListTasks request indicating that more - /// results are available to fulfill the request and further calls will be needed. If - /// maxResults was provided, it's possible the number of results to be fewer than - /// maxResults. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. + /// The nextToken value returned from a ListTasks request + /// indicating that more results are available to fulfill the request and further calls will + /// be needed. If maxResults was provided, it's possible the number of results + /// to be fewer than maxResults. This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes. public let nextToken: String? - /// The name of the service to use when filtering the ListTasks results. Specifying a - /// serviceName limits the results to tasks that belong to that service. + /// The name of the service to use when filtering the ListTasks results. + /// Specifying a serviceName limits the results to tasks that belong to that + /// service. public let serviceName: String? - /// The startedBy value to filter the task results with. Specifying a startedBy - /// value limits the results to tasks that were started with that value. When you specify startedBy as the filter, it must be the only filter that you - /// use. + /// The startedBy value to filter the task results with. Specifying a + /// startedBy value limits the results to tasks that were started with that + /// value. When you specify startedBy as the filter, it must be the only filter that + /// you use. public let startedBy: String? @inlinable @@ -4673,9 +4914,10 @@ extension ECS { } public struct ListTasksResponse: AWSDecodableShape { - /// The nextToken value to include in a future ListTasks request. When the - /// results of a ListTasks request exceed maxResults, this value can be used to - /// retrieve the next page of results. This value is null when there are no more results to + /// The nextToken value to include in a future ListTasks + /// request. When the results of a ListTasks request exceed + /// maxResults, this value can be used to retrieve the next page of + /// results. This value is null when there are no more results to /// return. public let nextToken: String? /// The list of task ARN entries for the ListTasks request. @@ -4694,24 +4936,30 @@ extension ECS { } public struct LoadBalancer: AWSEncodableShape & AWSDecodableShape { - /// The name of the container (as it appears in a container definition) to associate with the load - /// balancer. You need to specify the container name when configuring the target group for an Amazon ECS load - /// balancer. + /// The name of the container (as it appears in a container definition) to associate with + /// the load balancer. You need to specify the container name when configuring the target group for an Amazon ECS + /// load balancer. public let containerName: String? - /// The port on the container to associate with the load balancer. This port must correspond to a - /// containerPort in the task definition the tasks in the service are using. For tasks - /// that use the EC2 launch type, the container instance they're launched on must allow - /// ingress traffic on the hostPort of the port mapping. + /// The port on the container to associate with the load balancer. This port must + /// correspond to a containerPort in the task definition the tasks in the + /// service are using. For tasks that use the EC2 launch type, the container + /// instance they're launched on must allow ingress traffic on the hostPort of + /// the port mapping. public let containerPort: Int? - /// The name of the load balancer to associate with the Amazon ECS service or task set. If you are using an Application Load Balancer or a Network Load Balancer the load balancer name parameter should be omitted. + /// The name of the load balancer to associate with the Amazon ECS service or task set. If you are using an Application Load Balancer or a Network Load Balancer the load balancer name parameter should be + /// omitted. public let loadBalancerName: String? - /// The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set. A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. For services using the ECS deployment controller, you can specify one or multiple target - /// groups. For more information, see Registering multiple target groups with a service in the Amazon Elastic Container Service Developer Guide. For services using the CODE_DEPLOY deployment controller, you're required to define two - /// target groups for the load balancer. For more information, see Blue/green deployment with CodeDeploy in the Amazon Elastic Container Service Developer Guide. If your service's task definition uses the awsvpc network mode, you must choose - /// ip as the target type, not instance. Do this when creating your - /// target groups because tasks that use the awsvpc network mode are associated with an - /// elastic network interface, not an Amazon EC2 instance. This network mode is required for the - /// Fargate launch type. + /// The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or + /// task set. A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. For services using the ECS deployment controller, you can specify one or + /// multiple target groups. For more information, see Registering multiple target groups with a service in + /// the Amazon Elastic Container Service Developer Guide. For services using the CODE_DEPLOY deployment controller, you're required + /// to define two target groups for the load balancer. For more information, see Blue/green deployment with CodeDeploy in the + /// Amazon Elastic Container Service Developer Guide. If your service's task definition uses the awsvpc network mode, you + /// must choose ip as the target type, not instance. Do this + /// when creating your target groups because tasks that use the awsvpc + /// network mode are associated with an elastic network interface, not an Amazon EC2 + /// instance. This network mode is required for the Fargate launch + /// type. public let targetGroupArn: String? @inlinable @@ -4731,77 +4979,95 @@ extension ECS { } public struct LogConfiguration: AWSEncodableShape & AWSDecodableShape { - /// The log driver to use for the container. For tasks on Fargate, the supported log drivers are awslogs, splunk, and - /// awsfirelens. For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs, - /// fluentd, gelf, json-file, journald, - /// syslog, splunk, and awsfirelens. For more information about using the awslogs log driver, see Send Amazon ECS logs to CloudWatch in the - /// Amazon Elastic Container Service Developer Guide. For more information about using the awsfirelens log driver, see Send Amazon ECS logs to - /// an Amazon Web Services service or Amazon Web Services Partner. If you have a custom driver that isn't listed, you can fork the Amazon ECS container agent project - /// that's available on GitHub and - /// customize it to work with that driver. We encourage you to submit pull requests for changes that - /// you would like to have included. However, we don't currently provide support for running modified - /// copies of this software. + /// The log driver to use for the container. For tasks on Fargate, the supported log drivers are awslogs, + /// splunk, and awsfirelens. For tasks hosted on Amazon EC2 instances, the supported log drivers are + /// awslogs, fluentd, gelf, + /// json-file, journald, syslog, + /// splunk, and awsfirelens. For more information about using the awslogs log driver, see Send + /// Amazon ECS logs to CloudWatch in the Amazon Elastic Container Service Developer Guide. For more information about using the awsfirelens log driver, see Send + /// Amazon ECS logs to an Amazon Web Services service or Amazon Web Services Partner. If you have a custom driver that isn't listed, you can fork the Amazon ECS container + /// agent project that's available + /// on GitHub and customize it to work with that driver. We encourage you to + /// submit pull requests for changes that you would like to have included. However, we + /// don't currently provide support for running modified copies of this software. public let logDriver: LogDriver - /// The configuration options to send to the log driver. The options you can specify depend on the log driver. Some of the options you can specify when you - /// use the awslogs log driver to route logs to Amazon CloudWatch include the following: awslogs-create-group Required: No Specify whether you want the log group to be created automatically. If this option isn't - /// specified, it defaults to false. Your IAM policy must include the logs:CreateLogGroup permission before - /// you attempt to use awslogs-create-group. awslogs-region Required: Yes Specify the Amazon Web Services Region that the awslogs log driver is to send your Docker - /// logs to. You can choose to send all of your logs from clusters in different Regions to a - /// single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you - /// can separate them by Region for more granularity. Make sure that the specified log group - /// exists in the Region that you specify with this option. awslogs-group Required: Yes Make sure to specify a log group that the awslogs log driver sends its log - /// streams to. awslogs-stream-prefix Required: Yes, when using the Fargate launch - /// type.Optional for the EC2 launch type, required for the - /// Fargate launch type. Use the awslogs-stream-prefix option to associate a log stream with the - /// specified prefix, the container name, and the ID of the Amazon ECS task that the container - /// belongs to. If you specify a prefix with this option, then the log stream takes the format - /// prefix-name/container-name/ecs-task-id. If you don't specify a prefix with this option, then the log stream is named after the - /// container ID that's assigned by the Docker daemon on the container instance. Because it's - /// difficult to trace logs back to the container that sent them with just the Docker container - /// ID (which is only available on the container instance), we recommend that you specify a - /// prefix with this option. For Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace - /// log streams to the service that the container belongs to, the name of the container that - /// sent them, and the ID of the task that the container belongs to. You must specify a stream-prefix for your logs to have your logs appear in the Log pane - /// when using the Amazon ECS console. awslogs-datetime-format Required: No This option defines a multiline start pattern in Python strftime format. A - /// log message consists of a line that matches the pattern and any following lines that don’t - /// match the pattern. The matched line is the delimiter between log messages. One example of a use case for using this format is for parsing output such as a stack - /// dump, which might otherwise be logged in multiple entries. The correct pattern allows it to - /// be captured in a single entry. For more information, see awslogs-datetime-format. You cannot configure both the awslogs-datetime-format and - /// awslogs-multiline-pattern options. Multiline logging performs regular expression parsing and matching of all log - /// messages. This might have a negative impact on logging performance. awslogs-multiline-pattern Required: No This option defines a multiline start pattern that uses a regular expression. A log - /// message consists of a line that matches the pattern and any following lines that don’t - /// match the pattern. The matched line is the delimiter between log messages. For more information, see awslogs-multiline-pattern. This option is ignored if awslogs-datetime-format is also configured. You cannot configure both the awslogs-datetime-format and - /// awslogs-multiline-pattern options. Multiline logging performs regular expression parsing and matching of all log - /// messages. This might have a negative impact on logging performance. mode Required: No Valid values: non-blocking | blocking This option defines the delivery mode of log messages from the container to CloudWatch Logs. The - /// delivery mode you choose affects application availability when the flow of logs from - /// container to CloudWatch is interrupted. If you use the blocking mode and the flow of logs to CloudWatch is interrupted, - /// calls from container code to write to the stdout and stderr - /// streams will block. The logging thread of the application will block as a result. This may - /// cause the application to become unresponsive and lead to container healthcheck failure. If you use the non-blocking mode, the container's logs are instead stored in - /// an in-memory intermediate buffer configured with the max-buffer-size option. - /// This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. - /// We recommend using this mode if you want to ensure service availability and are okay with - /// some log loss. For more information, see Preventing log loss with non-blocking mode in the awslogs container log - /// driver. max-buffer-size Required: No Default value: 1m When non-blocking mode is used, the max-buffer-size log option - /// controls the size of the buffer that's used for intermediate message storage. Make sure to - /// specify an adequate buffer size based on your application. When the buffer fills up, + /// The configuration options to send to the log driver. The options you can specify depend on the log driver. Some of the options you can + /// specify when you use the awslogs log driver to route logs to Amazon CloudWatch + /// include the following: awslogs-create-group Required: No Specify whether you want the log group to be created automatically. If + /// this option isn't specified, it defaults to false. Your IAM policy must include the logs:CreateLogGroup + /// permission before you attempt to use + /// awslogs-create-group. awslogs-region Required: Yes Specify the Amazon Web Services Region that the awslogs log driver is to + /// send your Docker logs to. You can choose to send all of your logs from + /// clusters in different Regions to a single region in CloudWatch Logs. This is so that + /// they're all visible in one location. Otherwise, you can separate them by + /// Region for more granularity. Make sure that the specified log group exists + /// in the Region that you specify with this option. awslogs-group Required: Yes Make sure to specify a log group that the awslogs log driver + /// sends its log streams to. awslogs-stream-prefix Required: Yes, when using the Fargate launch + /// type.Optional for the EC2 launch type, + /// required for the Fargate launch type. Use the awslogs-stream-prefix option to associate a log + /// stream with the specified prefix, the container name, and the ID of the + /// Amazon ECS task that the container belongs to. If you specify a prefix with this + /// option, then the log stream takes the format + /// prefix-name/container-name/ecs-task-id. If you don't specify a prefix with this option, then the log stream is + /// named after the container ID that's assigned by the Docker daemon on the + /// container instance. Because it's difficult to trace logs back to the + /// container that sent them with just the Docker container ID (which is only + /// available on the container instance), we recommend that you specify a prefix + /// with this option. For Amazon ECS services, you can use the service name as the prefix. Doing so, + /// you can trace log streams to the service that the container belongs to, the + /// name of the container that sent them, and the ID of the task that the + /// container belongs to. You must specify a stream-prefix for your logs to have your logs appear in + /// the Log pane when using the Amazon ECS console. awslogs-datetime-format Required: No This option defines a multiline start pattern in Python + /// strftime format. A log message consists of a line that + /// matches the pattern and any following lines that don’t match the pattern. + /// The matched line is the delimiter between log messages. One example of a use case for using this format is for parsing output such + /// as a stack dump, which might otherwise be logged in multiple entries. The + /// correct pattern allows it to be captured in a single entry. For more information, see awslogs-datetime-format. You cannot configure both the awslogs-datetime-format and + /// awslogs-multiline-pattern options. Multiline logging performs regular expression parsing and matching of + /// all log messages. This might have a negative impact on logging + /// performance. awslogs-multiline-pattern Required: No This option defines a multiline start pattern that uses a regular + /// expression. A log message consists of a line that matches the pattern and + /// any following lines that don’t match the pattern. The matched line is the + /// delimiter between log messages. For more information, see awslogs-multiline-pattern. This option is ignored if awslogs-datetime-format is also + /// configured. You cannot configure both the awslogs-datetime-format and + /// awslogs-multiline-pattern options. Multiline logging performs regular expression parsing and matching of + /// all log messages. This might have a negative impact on logging + /// performance. mode Required: No Valid values: non-blocking | blocking This option defines the delivery mode of log messages from the container + /// to CloudWatch Logs. The delivery mode you choose affects application availability when + /// the flow of logs from container to CloudWatch is interrupted. If you use the blocking mode and the flow of logs to CloudWatch is + /// interrupted, calls from container code to write to the stdout + /// and stderr streams will block. The logging thread of the + /// application will block as a result. This may cause the application to become + /// unresponsive and lead to container healthcheck failure. If you use the non-blocking mode, the container's logs are + /// instead stored in an in-memory intermediate buffer configured with the + /// max-buffer-size option. This prevents the application from + /// becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using + /// this mode if you want to ensure service availability and are okay with some + /// log loss. For more information, see Preventing log loss with non-blocking mode in the awslogs + /// container log driver. max-buffer-size Required: No Default value: 1m When non-blocking mode is used, the + /// max-buffer-size log option controls the size of the buffer + /// that's used for intermediate message storage. Make sure to specify an + /// adequate buffer size based on your application. When the buffer fills up, /// further logs cannot be stored. Logs that cannot be stored are lost. To route logs using the splunk log router, you need to specify a - /// splunk-token and a splunk-url. When you use the awsfirelens log router to route logs to an Amazon Web Services Service or Amazon Web Services Partner Network - /// destination for log storage and analytics, you can set the log-driver-buffer-limit option - /// to limit the number of events that are buffered in memory, before being sent to the log router - /// container. It can help to resolve potential log loss issue because high throughput might result in - /// memory running out for the buffer inside of Docker. Other options you can specify when using awsfirelens to route logs depend on the - /// destination. When you export logs to Amazon Data Firehose, you can specify the Amazon Web Services Region with - /// region and a name for the log stream with delivery_stream. When you export logs to Amazon Kinesis Data Streams, you can specify an Amazon Web Services Region with region and a - /// data stream name with stream. When you export logs to Amazon OpenSearch Service, you can specify options like Name, Host - /// (OpenSearch Service endpoint without protocol), Port, Index, Type, - /// Aws_auth, Aws_region, Suppress_Type_Name, and - /// tls. When you export logs to Amazon S3, you can specify the bucket using the bucket option. You - /// can also specify region, total_file_size, upload_timeout, and - /// use_put_object as options. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}' + /// splunk-token and a splunk-url. When you use the awsfirelens log router to route logs to an Amazon Web Services Service + /// or Amazon Web Services Partner Network destination for log storage and analytics, you can set the + /// log-driver-buffer-limit option to limit the number of events that are + /// buffered in memory, before being sent to the log router container. It can help to + /// resolve potential log loss issue because high throughput might result in memory running + /// out for the buffer inside of Docker. Other options you can specify when using awsfirelens to route logs depend + /// on the destination. When you export logs to Amazon Data Firehose, you can specify the Amazon Web Services Region + /// with region and a name for the log stream with + /// delivery_stream. When you export logs to Amazon Kinesis Data Streams, you can specify an Amazon Web Services Region with + /// region and a data stream name with stream. When you export logs to Amazon OpenSearch Service, you can specify options like Name, + /// Host (OpenSearch Service endpoint without protocol), Port, + /// Index, Type, Aws_auth, + /// Aws_region, Suppress_Type_Name, and + /// tls. For more information, see Under the hood: FireLens for Amazon ECS Tasks. When you export logs to Amazon S3, you can specify the bucket using the bucket + /// option. You can also specify region, total_file_size, + /// upload_timeout, and use_put_object as options. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}' public let options: [String: String]? - /// The secrets to pass to the log configuration. For more information, see Specifying sensitive - /// data in the Amazon Elastic Container Service Developer Guide. + /// The secrets to pass to the log configuration. For more information, see Specifying + /// sensitive data in the Amazon Elastic Container Service Developer Guide. public let secretOptions: [Secret]? @inlinable @@ -4823,8 +5089,8 @@ extension ECS { public let lastStartedAt: Date? /// The last known status of the managed agent. public let lastStatus: String? - /// The name of the managed agent. When the execute command feature is turned on, the managed agent name - /// is ExecuteCommandAgent. + /// The name of the managed agent. When the execute command feature is turned on, the + /// managed agent name is ExecuteCommandAgent. public let name: ManagedAgentName? /// The reason for why the managed agent is in the state it is in. public let reason: String? @@ -4872,27 +5138,30 @@ extension ECS { } public struct ManagedScaling: AWSEncodableShape & AWSDecodableShape { - /// The period of time, in seconds, after a newly launched Amazon EC2 instance can contribute to CloudWatch metrics - /// for Auto Scaling group. If this parameter is omitted, the default value of 300 seconds is - /// used. + /// The period of time, in seconds, after a newly launched Amazon EC2 instance can contribute + /// to CloudWatch metrics for Auto Scaling group. If this parameter is omitted, the default value + /// of 300 seconds is used. public let instanceWarmupPeriod: Int? - /// The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. If this parameter is - /// omitted, the default value of 10000 is used. + /// The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. If this + /// parameter is omitted, the default value of 10000 is used. public let maximumScalingStepSize: Int? - /// The minimum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is - /// not affected by this parameter If this parameter is omitted, the default value of 1 is - /// used. When additional capacity is required, Amazon ECS will scale up the minimum scaling step size even if the - /// actual demand is less than the minimum scaling step size. If you use a capacity provider with an Auto Scaling group configured with more than one Amazon EC2 - /// instance type or Availability Zone, Amazon ECS will scale up by the exact minimum scaling step size value - /// and will ignore both the maximum scaling step size as well as the capacity demand. + /// The minimum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale + /// in process is not affected by this parameter If this parameter is omitted, the default + /// value of 1 is used. When additional capacity is required, Amazon ECS will scale up the minimum scaling step + /// size even if the actual demand is less than the minimum scaling step size. If you use a capacity provider with an Auto Scaling group configured with more than + /// one Amazon EC2 instance type or Availability Zone, Amazon ECS will scale up by the exact minimum + /// scaling step size value and will ignore both the maximum scaling step size as well as + /// the capacity demand. public let minimumScalingStepSize: Int? /// Determines whether to use managed scaling for the capacity provider. public let status: ManagedScalingStatus? - /// The target capacity utilization as a percentage for the capacity provider. The specified value must - /// be greater than 0 and less than or equal to 100. For example, if you want the - /// capacity provider to maintain 10% spare capacity, then that means the utilization is 90%, so use a - /// targetCapacity of 90. The default value of 100 percent - /// results in the Amazon EC2 instances in your Auto Scaling group being completely used. + /// The target capacity utilization as a percentage for the capacity provider. The + /// specified value must be greater than 0 and less than or equal to + /// 100. For example, if you want the capacity provider to maintain 10% + /// spare capacity, then that means the utilization is 90%, so use a + /// targetCapacity of 90. The default value of + /// 100 percent results in the Amazon EC2 instances in your Auto Scaling group + /// being completely used. public let targetCapacity: Int? @inlinable @@ -4945,12 +5214,12 @@ extension ECS { public struct MountPoint: AWSEncodableShape & AWSDecodableShape { /// The path on the container to mount the host volume at. public let containerPath: String? - /// If this value is true, the container has read-only access to the volume. If this value - /// is false, then the container can write to the volume. The default value is - /// false. + /// If this value is true, the container has read-only access to the volume. + /// If this value is false, then the container can write to the volume. The + /// default value is false. public let readOnly: Bool? - /// The name of the volume to mount. Must be a volume name referenced in the name parameter - /// of task definition volume. + /// The name of the volume to mount. Must be a volume name referenced in the + /// name parameter of task definition volume. public let sourceVolume: String? @inlinable @@ -4972,7 +5241,8 @@ extension ECS { public let bindIP: String? /// The port number on the container that's used with the network binding. public let containerPort: Int? - /// The port number range on the container that's bound to the dynamically mapped host port range. The following rules apply when you specify a containerPortRange: You must use either the bridge network mode or the awsvpc + /// The port number range on the container that's bound to the dynamically mapped host + /// port range. The following rules apply when you specify a containerPortRange: You must use either the bridge network mode or the awsvpc /// network mode. This parameter is available for both the EC2 and Fargate launch types. This parameter is available for both the Linux and Windows operating systems. The container instance must have at least version 1.67.0 of the container agent /// and at least version 1.67.0-1 of the ecs-init package You can specify a maximum of 100 port ranges per container. You do not specify a hostPortRange. The value of the hostPortRange is set /// as follows: For containers in a task with the awsvpc network mode, @@ -4984,8 +5254,8 @@ extension ECS { public let containerPortRange: String? /// The port number on the host that's used with the network binding. public let hostPort: Int? - /// The port number range on the host that's used with the network binding. This is assigned is assigned - /// by Docker and delivered by the Amazon ECS agent. + /// The port number range on the host that's used with the network binding. This is + /// assigned is assigned by Docker and delivered by the Amazon ECS agent. public let hostPortRange: String? /// The protocol used for the network binding. public let `protocol`: TransportProtocol? @@ -5047,14 +5317,14 @@ extension ECS { } public struct PlacementConstraint: AWSEncodableShape & AWSDecodableShape { - /// A cluster query language expression to apply to the constraint. The expression can have a maximum - /// length of 2000 characters. You can't specify an expression if the constraint type is - /// distinctInstance. For more information, see Cluster - /// query language in the Amazon Elastic Container Service Developer Guide. + /// A cluster query language expression to apply to the constraint. The expression can + /// have a maximum length of 2000 characters. You can't specify an expression if the + /// constraint type is distinctInstance. For more information, see Cluster query language in the Amazon Elastic Container Service Developer Guide. public let expression: String? - /// The type of constraint. Use distinctInstance to ensure that each task in a particular - /// group is running on a different container instance. Use memberOf to restrict the selection - /// to a group of valid candidates. + /// The type of constraint. Use distinctInstance to ensure that each task in + /// a particular group is running on a different container instance. Use + /// memberOf to restrict the selection to a group of valid + /// candidates. public let type: PlacementConstraintType? @inlinable @@ -5070,19 +5340,21 @@ extension ECS { } public struct PlacementStrategy: AWSEncodableShape & AWSDecodableShape { - /// The field to apply the placement strategy against. For the spread placement strategy, - /// valid values are instanceId (or host, which has the same effect), or any - /// platform or custom attribute that's applied to a container instance, such as - /// attribute:ecs.availability-zone. For the binpack placement strategy, - /// valid values are cpu and memory. For the random placement - /// strategy, this field is not used. + /// The field to apply the placement strategy against. For the spread + /// placement strategy, valid values are instanceId (or host, + /// which has the same effect), or any platform or custom attribute that's applied to a + /// container instance, such as attribute:ecs.availability-zone. For the + /// binpack placement strategy, valid values are cpu and + /// memory. For the random placement strategy, this field is + /// not used. public let field: String? - /// The type of placement strategy. The random placement strategy randomly places tasks on - /// available candidates. The spread placement strategy spreads placement across available - /// candidates evenly based on the field parameter. The binpack strategy places - /// tasks on available candidates that have the least available amount of the resource that's specified - /// with the field parameter. For example, if you binpack on memory, a task is placed on the - /// instance with the least amount of remaining memory but still enough to run the task. + /// The type of placement strategy. The random placement strategy randomly + /// places tasks on available candidates. The spread placement strategy spreads + /// placement across available candidates evenly based on the field parameter. + /// The binpack strategy places tasks on available candidates that have the + /// least available amount of the resource that's specified with the field + /// parameter. For example, if you binpack on memory, a task is placed on the instance with + /// the least amount of remaining memory but still enough to run the task. public let type: PlacementStrategyType? @inlinable @@ -5098,11 +5370,12 @@ extension ECS { } public struct PlatformDevice: AWSEncodableShape { - /// The ID for the GPUs on the container instance. The available GPU IDs can also be obtained on the - /// container instance in the /var/lib/ecs/gpu/nvidia_gpu_info.json file. + /// The ID for the GPUs on the container instance. The available GPU IDs can also be + /// obtained on the container instance in the + /// /var/lib/ecs/gpu/nvidia_gpu_info.json file. public let id: String - /// The type of device that's available on the container instance. The only supported value is - /// GPU. + /// The type of device that's available on the container instance. The only supported + /// value is GPU. public let type: PlatformDeviceType @inlinable @@ -5118,27 +5391,29 @@ extension ECS { } public struct PortMapping: AWSEncodableShape & AWSDecodableShape { - /// The application protocol that's used for the port mapping. This parameter only applies to - /// Service Connect. We recommend that you set this parameter to be consistent with the protocol that your - /// application uses. If you set this parameter, Amazon ECS adds protocol-specific connection handling to the - /// Service Connect proxy. If you set this parameter, Amazon ECS adds protocol-specific telemetry in the Amazon ECS - /// console and CloudWatch. If you don't set a value for this parameter, then TCP is used. However, Amazon ECS doesn't add - /// protocol-specific telemetry for TCP. appProtocol is immutable in a Service Connect service. Updating this field requires a - /// service deletion and redeployment. Tasks that run in a namespace can use short names to connect + /// The application protocol that's used for the port mapping. This parameter only applies + /// to Service Connect. We recommend that you set this parameter to be consistent with the + /// protocol that your application uses. If you set this parameter, Amazon ECS adds + /// protocol-specific connection handling to the Service Connect proxy. If you set this + /// parameter, Amazon ECS adds protocol-specific telemetry in the Amazon ECS console and CloudWatch. If you don't set a value for this parameter, then TCP is used. However, Amazon ECS doesn't + /// add protocol-specific telemetry for TCP. appProtocol is immutable in a Service Connect service. Updating this + /// field requires a service deletion and redeployment. Tasks that run in a namespace can use short names to connect /// to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. /// Tasks connect through a managed proxy container /// that collects logs and metrics for increased visibility. /// Only the tasks that Amazon ECS services create are supported with Service Connect. /// For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide. public let appProtocol: ApplicationProtocol? - /// The port number on the container that's bound to the user-specified or automatically assigned host - /// port. If you use containers in a task with the awsvpc or host network mode, - /// specify the exposed ports using containerPort. If you use containers in a task with the bridge network mode and you specify a container - /// port and not a host port, your container automatically receives a host port in the ephemeral port - /// range. For more information, see hostPort. Port mappings that are automatically assigned - /// in this way do not count toward the 100 reserved ports limit of a container instance. + /// The port number on the container that's bound to the user-specified or automatically + /// assigned host port. If you use containers in a task with the awsvpc or host + /// network mode, specify the exposed ports using containerPort. If you use containers in a task with the bridge network mode and you + /// specify a container port and not a host port, your container automatically receives a + /// host port in the ephemeral port range. For more information, see hostPort. + /// Port mappings that are automatically assigned in this way do not count toward the 100 + /// reserved ports limit of a container instance. public let containerPort: Int? - /// The port number range on the container that's bound to the dynamically mapped host port range. The following rules apply when you specify a containerPortRange: You must use either the bridge network mode or the awsvpc + /// The port number range on the container that's bound to the dynamically mapped host + /// port range. The following rules apply when you specify a containerPortRange: You must use either the bridge network mode or the awsvpc /// network mode. This parameter is available for both the EC2 and Fargate launch types. This parameter is available for both the Linux and Windows operating systems. The container instance must have at least version 1.67.0 of the container agent /// and at least version 1.67.0-1 of the ecs-init package You can specify a maximum of 100 port ranges per container. You do not specify a hostPortRange. The value of the hostPortRange is set /// as follows: For containers in a task with the awsvpc network mode, @@ -5148,36 +5423,41 @@ extension ECS { /// 65535. A port can only be included in one port mapping per container. You cannot specify overlapping port ranges. The first port in the range must be less than last port in the range. Docker recommends that you turn off the docker-proxy in the Docker daemon config file when you have a large number of ports. For more information, see Issue #11185 on the Github website. For information about how to turn off the docker-proxy in the Docker daemon config file, see Docker daemon in the Amazon ECS Developer Guide. You can call DescribeTasks to view the hostPortRange which /// are the host ports that are bound to the container ports. public let containerPortRange: String? - /// The port number on the container instance to reserve for your container. If you specify a containerPortRange, leave this field empty and the value of the - /// hostPort is set as follows: For containers in a task with the awsvpc network mode, the hostPort - /// is set to the same value as the containerPort. This is a static mapping - /// strategy. For containers in a task with the bridge network mode, the Amazon ECS agent finds - /// open ports on the host and automatically binds them to the container ports. This is a dynamic - /// mapping strategy. If you use containers in a task with the awsvpc or host network mode, the - /// hostPort can either be left blank or set to the same value as the - /// containerPort. If you use containers in a task with the bridge network mode, you can specify a - /// non-reserved host port for your container port mapping, or you can omit the hostPort (or - /// set it to 0) while specifying a containerPort and your container - /// automatically receives a port in the ephemeral port range for your container instance operating system - /// and Docker version. The default ephemeral port range for Docker version 1.6.0 and later is listed on the instance under - /// /proc/sys/net/ipv4/ip_local_port_range. If this kernel parameter is unavailable, the - /// default ephemeral port range from 49153 through 65535 (Linux) or 49152 through 65535 (Windows) is used. - /// Do not attempt to specify a host port in the ephemeral port range as these are reserved for automatic - /// assignment. In general, ports below 32768 are outside of the ephemeral port range. The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the Amazon ECS container - /// agent ports 51678-51680. Any host port that was previously specified in a running task is also reserved - /// while the task is running. That is, after a task stops, the host port is released. The current reserved - /// ports are displayed in the remainingResources of DescribeContainerInstances output. A container instance can have up to 100 reserved ports - /// at a time. This number includes the default reserved ports. Automatically assigned ports aren't - /// included in the 100 reserved ports quota. + /// The port number on the container instance to reserve for your container. If you specify a containerPortRange, leave this field empty and the value + /// of the hostPort is set as follows: For containers in a task with the awsvpc network mode, the + /// hostPort is set to the same value as the + /// containerPort. This is a static mapping strategy. For containers in a task with the bridge network mode, the Amazon ECS + /// agent finds open ports on the host and automatically binds them to the container + /// ports. This is a dynamic mapping strategy. If you use containers in a task with the awsvpc or host + /// network mode, the hostPort can either be left blank or set to the same + /// value as the containerPort. If you use containers in a task with the bridge network mode, you can + /// specify a non-reserved host port for your container port mapping, or you can omit the + /// hostPort (or set it to 0) while specifying a + /// containerPort and your container automatically receives a port in the + /// ephemeral port range for your container instance operating system and Docker + /// version. The default ephemeral port range for Docker version 1.6.0 and later is listed on the + /// instance under /proc/sys/net/ipv4/ip_local_port_range. If this kernel + /// parameter is unavailable, the default ephemeral port range from 49153 through 65535 + /// (Linux) or 49152 through 65535 (Windows) is used. Do not attempt to specify a host port + /// in the ephemeral port range as these are reserved for automatic assignment. In general, + /// ports below 32768 are outside of the ephemeral port range. The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the + /// Amazon ECS container agent ports 51678-51680. Any host port that was previously specified in + /// a running task is also reserved while the task is running. That is, after a task stops, + /// the host port is released. The current reserved ports are displayed in the + /// remainingResources of DescribeContainerInstances output. A container instance can have up to 100 + /// reserved ports at a time. This number includes the default reserved ports. Automatically + /// assigned ports aren't included in the 100 reserved ports quota. public let hostPort: Int? - /// The name that's used for the port mapping. This parameter is the name that you use in the - /// serviceConnectConfiguration and the vpcLatticeConfigurations of a service. - /// The name can include up to 64 characters. The characters can include lowercase letters, numbers, - /// underscores (_), and hyphens (-). The name can't start with a hyphen. + /// The name that's used for the port mapping. This parameter is the name that you use in + /// the serviceConnectConfiguration and the + /// vpcLatticeConfigurations of a service. The name can include up to 64 + /// characters. The characters can include lowercase letters, numbers, underscores (_), and + /// hyphens (-). The name can't start with a hyphen. public let name: String? - /// The protocol used for the port mapping. Valid values are tcp and udp. The - /// default is tcp. protocol is immutable in a Service Connect service. Updating - /// this field requires a service deletion and redeployment. + /// The protocol used for the port mapping. Valid values are tcp and + /// udp. The default is tcp. protocol is + /// immutable in a Service Connect service. Updating this field requires a service deletion + /// and redeployment. public let `protocol`: TransportProtocol? @inlinable @@ -5203,8 +5483,8 @@ extension ECS { public struct ProtectedTask: AWSDecodableShape { /// The epoch time when protection for the task will expire. public let expirationDate: Date? - /// The protection status of the task. If scale-in protection is on for a task, the value is - /// true. Otherwise, it is false. + /// The protection status of the task. If scale-in protection is on for a task, the value + /// is true. Otherwise, it is false. public let protectionEnabled: Bool? /// The task ARN. public let taskArn: String? @@ -5226,22 +5506,22 @@ extension ECS { public struct ProxyConfiguration: AWSEncodableShape & AWSDecodableShape { /// The name of the container that will serve as the App Mesh proxy. public let containerName: String - /// The set of network configuration parameters to provide the Container Network Interface (CNI) plugin, - /// specified as key-value pairs. IgnoredUID - (Required) The user ID (UID) of the proxy container as - /// defined by the user parameter in a container definition. This is used to ensure - /// the proxy ignores its own traffic. If IgnoredGID is specified, this field can be - /// empty. IgnoredGID - (Required) The group ID (GID) of the proxy container as - /// defined by the user parameter in a container definition. This is used to ensure - /// the proxy ignores its own traffic. If IgnoredUID is specified, this field can be - /// empty. AppPorts - (Required) The list of ports that the application uses. Network - /// traffic to these ports is forwarded to the ProxyIngressPort and - /// ProxyEgressPort. ProxyIngressPort - (Required) Specifies the port that incoming traffic to - /// the AppPorts is directed to. ProxyEgressPort - (Required) Specifies the port that outgoing traffic from - /// the AppPorts is directed to. EgressIgnoredPorts - (Required) The egress traffic going to the specified - /// ports is ignored and not redirected to the ProxyEgressPort. It can be an empty - /// list. EgressIgnoredIPs - (Required) The egress traffic going to the specified IP - /// addresses is ignored and not redirected to the ProxyEgressPort. It can be an empty - /// list. + /// The set of network configuration parameters to provide the Container Network Interface + /// (CNI) plugin, specified as key-value pairs. IgnoredUID - (Required) The user ID (UID) of the proxy + /// container as defined by the user parameter in a container + /// definition. This is used to ensure the proxy ignores its own traffic. If + /// IgnoredGID is specified, this field can be empty. IgnoredGID - (Required) The group ID (GID) of the proxy + /// container as defined by the user parameter in a container + /// definition. This is used to ensure the proxy ignores its own traffic. If + /// IgnoredUID is specified, this field can be empty. AppPorts - (Required) The list of ports that the + /// application uses. Network traffic to these ports is forwarded to the + /// ProxyIngressPort and ProxyEgressPort. ProxyIngressPort - (Required) Specifies the port that + /// incoming traffic to the AppPorts is directed to. ProxyEgressPort - (Required) Specifies the port that + /// outgoing traffic from the AppPorts is directed to. EgressIgnoredPorts - (Required) The egress traffic going to + /// the specified ports is ignored and not redirected to the + /// ProxyEgressPort. It can be an empty list. EgressIgnoredIPs - (Required) The egress traffic going to + /// the specified IP addresses is ignored and not redirected to the + /// ProxyEgressPort. It can be an empty list. public let properties: [KeyValuePair]? /// The proxy type. The only supported value is APPMESH. public let type: ProxyConfigurationType? @@ -5261,63 +5541,70 @@ extension ECS { } public struct PutAccountSettingDefaultRequest: AWSEncodableShape { - /// The resource name for which to modify the account setting. The following are the valid values for the account setting name. serviceLongArnFormat - When modified, the Amazon Resource Name (ARN) and - /// resource ID format of the resource type for a specified user, role, or the root user for an - /// account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource - /// separately. The ARN and resource ID format of a resource is defined by the opt-in status of - /// the user or role that created the resource. You must turn on this setting to use Amazon ECS features - /// such as resource tagging. taskLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource - /// ID format of the resource type for a specified user, role, or the root user for an account is - /// affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource - /// separately. The ARN and resource ID format of a resource is defined by the opt-in status of - /// the user or role that created the resource. You must turn on this setting to use Amazon ECS features - /// such as resource tagging. containerInstanceLongArnFormat - When modified, the Amazon Resource Name (ARN) - /// and resource ID format of the resource type for a specified user, role, or the root user for an - /// account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource - /// separately. The ARN and resource ID format of a resource is defined by the opt-in status of - /// the user or role that created the resource. You must turn on this setting to use Amazon ECS features - /// such as resource tagging. awsvpcTrunking - When modified, the elastic network interface (ENI) limit for - /// any new container instances that support the feature is changed. If awsvpcTrunking - /// is turned on, any new container instances that support the feature are launched have the - /// increased ENI limits available to them. For more information, see Elastic Network Interface - /// Trunking in the Amazon Elastic Container Service Developer Guide. containerInsights - Container Insights with enhanced observability provides - /// all the Container Insights metrics, plus additional task and container metrics. - /// This version supports enhanced observability for Amazon ECS clusters using the Amazon EC2 - /// and Fargate launch types. After you configure Container Insights with enhanced - /// observability on Amazon ECS, Container Insights auto-collects detailed infrastructure - /// telemetry from the cluster level down to the container level in your environment and - /// displays these critical performance data in curated dashboards removing the - /// heavy lifting in observability set-up. To use Container Insights with enhanced observability, set the + /// The resource name for which to modify the account setting. The following are the valid values for the account setting name. serviceLongArnFormat - When modified, the Amazon Resource Name + /// (ARN) and resource ID format of the resource type for a specified user, role, or + /// the root user for an account is affected. The opt-in and opt-out account setting + /// must be set for each Amazon ECS resource separately. The ARN and resource ID format + /// of a resource is defined by the opt-in status of the user or role that created + /// the resource. You must turn on this setting to use Amazon ECS features such as + /// resource tagging. taskLongArnFormat - When modified, the Amazon Resource Name (ARN) + /// and resource ID format of the resource type for a specified user, role, or the + /// root user for an account is affected. The opt-in and opt-out account setting must + /// be set for each Amazon ECS resource separately. The ARN and resource ID format of a + /// resource is defined by the opt-in status of the user or role that created the + /// resource. You must turn on this setting to use Amazon ECS features such as resource + /// tagging. containerInstanceLongArnFormat - When modified, the Amazon + /// Resource Name (ARN) and resource ID format of the resource type for a specified + /// user, role, or the root user for an account is affected. The opt-in and opt-out + /// account setting must be set for each Amazon ECS resource separately. The ARN and + /// resource ID format of a resource is defined by the opt-in status of the user or + /// role that created the resource. You must turn on this setting to use Amazon ECS + /// features such as resource tagging. awsvpcTrunking - When modified, the elastic network interface + /// (ENI) limit for any new container instances that support the feature is changed. + /// If awsvpcTrunking is turned on, any new container instances that + /// support the feature are launched have the increased ENI limits available to + /// them. For more information, see Elastic + /// Network Interface Trunking in the Amazon Elastic Container Service Developer Guide. containerInsights - Container Insights with enhanced + /// observability provides all the Container Insights metrics, plus additional task + /// and container metrics. This version supports enhanced observability for Amazon ECS + /// clusters using the Amazon EC2 and Fargate launch types. After you configure + /// Container Insights with enhanced observability on Amazon ECS, Container Insights + /// auto-collects detailed infrastructure telemetry from the cluster level down to + /// the container level in your environment and displays these critical performance + /// data in curated dashboards removing the heavy lifting in observability set-up. To use Container Insights with enhanced observability, set the /// containerInsights account setting to /// enhanced. To use Container Insights, set the containerInsights account - /// setting to enabled. For more information, see Monitor Amazon ECS containers using Container Insights with enhanced observability in the Amazon Elastic Container Service Developer Guide. dualStackIPv6 - When turned on, when using a VPC in dual stack mode, your tasks - /// using the awsvpc network mode can have an IPv6 address assigned. For more - /// information on using IPv6 with tasks launched on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks - /// launched on Fargate, see Using a VPC in dual-stack mode. fargateFIPSMode - If you specify fargateFIPSMode, Fargate FIPS - /// 140 compliance is affected. fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a security or - /// infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be - /// stopped and new tasks launched to replace them. Use - /// fargateTaskRetirementWaitPeriod to configure the wait time to retire a - /// Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services - /// Fargate task maintenance in the Amazon ECS Developer - /// Guide. tagResourceAuthorization - Amazon ECS is introducing tagging authorization for - /// resource creation. Users must have permissions for actions that create the resource, such as - /// ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services - /// performs additional authorization to verify if users or roles have permissions to create tags. - /// Therefore, you must grant explicit permissions to use the ecs:TagResource action. - /// For more information, see Grant permission - /// to tag resources on creation in the Amazon ECS Developer - /// Guide. guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether + /// setting to enabled. For more information, see Monitor Amazon ECS containers using Container Insights with enhanced + /// observability in the Amazon Elastic Container Service Developer Guide. dualStackIPv6 - When turned on, when using a VPC in dual stack + /// mode, your tasks using the awsvpc network mode can have an IPv6 + /// address assigned. For more information on using IPv6 with tasks launched on + /// Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 + /// with tasks launched on Fargate, see Using a VPC in dual-stack mode. fargateFIPSMode - If you specify fargateFIPSMode, + /// Fargate FIPS 140 compliance is affected. fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a + /// security or infrastructure update is needed for an Amazon ECS task hosted on + /// Fargate, the tasks need to be stopped and new tasks launched to replace them. + /// Use fargateTaskRetirementWaitPeriod to configure the wait time to + /// retire a Fargate task. For information about the Fargate tasks maintenance, + /// see Amazon Web Services Fargate + /// task maintenance in the Amazon ECS Developer + /// Guide. tagResourceAuthorization - Amazon ECS is introducing tagging + /// authorization for resource creation. Users must have permissions for actions + /// that create the resource, such as ecsCreateCluster. If tags are + /// specified when you create a resource, Amazon Web Services performs additional authorization to + /// verify if users or roles have permissions to create tags. Therefore, you must + /// grant explicit permissions to use the ecs:TagResource action. For + /// more information, see Grant permission to tag resources on creation in the + /// Amazon ECS Developer Guide. guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether /// Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your /// Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring. public let name: SettingName /// The account setting value for the specified principal ARN. Accepted values are /// enabled, disabled, on, enhanced, /// and off. When you specify fargateTaskRetirementWaitPeriod for the - /// name, the following are the valid values: 0 - Amazon Web Services sends the notification, and immediately retires the affected - /// tasks. 7 - Amazon Web Services sends the notification, and waits 7 calendar days to retire the - /// tasks. 14 - Amazon Web Services sends the notification, and waits 14 calendar days to retire the - /// tasks. + /// name, the following are the valid values: 0 - Amazon Web Services sends the notification, and immediately retires the + /// affected tasks. 7 - Amazon Web Services sends the notification, and waits 7 calendar days to + /// retire the tasks. 14 - Amazon Web Services sends the notification, and waits 14 calendar days to + /// retire the tasks. public let value: String @inlinable @@ -5347,69 +5634,79 @@ extension ECS { } public struct PutAccountSettingRequest: AWSEncodableShape { - /// The Amazon ECS account setting name to modify. The following are the valid values for the account setting name. serviceLongArnFormat - When modified, the Amazon Resource Name (ARN) and - /// resource ID format of the resource type for a specified user, role, or the root user for an - /// account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource - /// separately. The ARN and resource ID format of a resource is defined by the opt-in status of - /// the user or role that created the resource. You must turn on this setting to use Amazon ECS features - /// such as resource tagging. taskLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource - /// ID format of the resource type for a specified user, role, or the root user for an account is - /// affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource - /// separately. The ARN and resource ID format of a resource is defined by the opt-in status of - /// the user or role that created the resource. You must turn on this setting to use Amazon ECS features - /// such as resource tagging. containerInstanceLongArnFormat - When modified, the Amazon Resource Name (ARN) - /// and resource ID format of the resource type for a specified user, role, or the root user for an - /// account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource - /// separately. The ARN and resource ID format of a resource is defined by the opt-in status of - /// the user or role that created the resource. You must turn on this setting to use Amazon ECS features - /// such as resource tagging. awsvpcTrunking - When modified, the elastic network interface (ENI) limit for - /// any new container instances that support the feature is changed. If awsvpcTrunking - /// is turned on, any new container instances that support the feature are launched have the - /// increased ENI limits available to them. For more information, see Elastic Network Interface - /// Trunking in the Amazon Elastic Container Service Developer Guide. containerInsights - Container Insights with enhanced observability provides - /// all the Container Insights metrics, plus additional task and container metrics. - /// This version supports enhanced observability for Amazon ECS clusters using the Amazon EC2 - /// and Fargate launch types. After you configure Container Insights with enhanced - /// observability on Amazon ECS, Container Insights auto-collects detailed infrastructure - /// telemetry from the cluster level down to the container level in your environment and - /// displays these critical performance data in curated dashboards removing the - /// heavy lifting in observability set-up. To use Container Insights with enhanced observability, set the + /// The Amazon ECS account setting name to modify. The following are the valid values for the account setting name. serviceLongArnFormat - When modified, the Amazon Resource Name + /// (ARN) and resource ID format of the resource type for a specified user, role, or + /// the root user for an account is affected. The opt-in and opt-out account setting + /// must be set for each Amazon ECS resource separately. The ARN and resource ID format + /// of a resource is defined by the opt-in status of the user or role that created + /// the resource. You must turn on this setting to use Amazon ECS features such as + /// resource tagging. taskLongArnFormat - When modified, the Amazon Resource Name (ARN) + /// and resource ID format of the resource type for a specified user, role, or the + /// root user for an account is affected. The opt-in and opt-out account setting must + /// be set for each Amazon ECS resource separately. The ARN and resource ID format of a + /// resource is defined by the opt-in status of the user or role that created the + /// resource. You must turn on this setting to use Amazon ECS features such as resource + /// tagging. fargateFIPSMode - When turned on, you can run Fargate workloads + /// in a manner that is compliant with Federal Information Processing Standard + /// (FIPS-140). For more information, see Fargate + /// Federal Information Processing Standard (FIPS-140). containerInstanceLongArnFormat - When modified, the Amazon + /// Resource Name (ARN) and resource ID format of the resource type for a specified + /// user, role, or the root user for an account is affected. The opt-in and opt-out + /// account setting must be set for each Amazon ECS resource separately. The ARN and + /// resource ID format of a resource is defined by the opt-in status of the user or + /// role that created the resource. You must turn on this setting to use Amazon ECS + /// features such as resource tagging. awsvpcTrunking - When modified, the elastic network interface + /// (ENI) limit for any new container instances that support the feature is changed. + /// If awsvpcTrunking is turned on, any new container instances that + /// support the feature are launched have the increased ENI limits available to + /// them. For more information, see Elastic + /// Network Interface Trunking in the Amazon Elastic Container Service Developer Guide. containerInsights - Container Insights with enhanced + /// observability provides all the Container Insights metrics, plus additional task + /// and container metrics. This version supports enhanced observability for Amazon ECS + /// clusters using the Amazon EC2 and Fargate launch types. After you configure + /// Container Insights with enhanced observability on Amazon ECS, Container Insights + /// auto-collects detailed infrastructure telemetry from the cluster level down to + /// the container level in your environment and displays these critical performance + /// data in curated dashboards removing the heavy lifting in observability set-up. To use Container Insights with enhanced observability, set the /// containerInsights account setting to - /// enhanced. To use Container Insights, set the containerInsights account setting to - /// enabled. For more information, see Monitor Amazon ECS containers using Container Insights with enhanced observability in the Amazon Elastic Container Service Developer Guide. dualStackIPv6 - When turned on, when using a VPC in dual stack mode, your tasks - /// using the awsvpc network mode can have an IPv6 address assigned. For more - /// information on using IPv6 with tasks launched on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks - /// launched on Fargate, see Using a VPC in dual-stack mode. fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a security or - /// infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be - /// stopped and new tasks launched to replace them. Use - /// fargateTaskRetirementWaitPeriod to configure the wait time to retire a - /// Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services - /// Fargate task maintenance in the Amazon ECS Developer - /// Guide. tagResourceAuthorization - Amazon ECS is introducing tagging authorization for - /// resource creation. Users must have permissions for actions that create the resource, such as - /// ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services - /// performs additional authorization to verify if users or roles have permissions to create tags. - /// Therefore, you must grant explicit permissions to use the ecs:TagResource action. - /// For more information, see Grant permission - /// to tag resources on creation in the Amazon ECS Developer - /// Guide. guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether + /// enhanced. To use Container Insights, set the containerInsights account + /// setting to enabled. For more information, see Monitor Amazon ECS containers using Container Insights with enhanced + /// observability in the Amazon Elastic Container Service Developer Guide. dualStackIPv6 - When turned on, when using a VPC in dual stack + /// mode, your tasks using the awsvpc network mode can have an IPv6 + /// address assigned. For more information on using IPv6 with tasks launched on + /// Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 + /// with tasks launched on Fargate, see Using a VPC in dual-stack mode. fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a + /// security or infrastructure update is needed for an Amazon ECS task hosted on + /// Fargate, the tasks need to be stopped and new tasks launched to replace them. + /// Use fargateTaskRetirementWaitPeriod to configure the wait time to + /// retire a Fargate task. For information about the Fargate tasks maintenance, + /// see Amazon Web Services Fargate + /// task maintenance in the Amazon ECS Developer + /// Guide. tagResourceAuthorization - Amazon ECS is introducing tagging + /// authorization for resource creation. Users must have permissions for actions + /// that create the resource, such as ecsCreateCluster. If tags are + /// specified when you create a resource, Amazon Web Services performs additional authorization to + /// verify if users or roles have permissions to create tags. Therefore, you must + /// grant explicit permissions to use the ecs:TagResource action. For + /// more information, see Grant permission to tag resources on creation in the + /// Amazon ECS Developer Guide. guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether /// Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your /// Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring. public let name: SettingName - /// The ARN of the principal, which can be a user, role, or the root user. If you specify the root user, it - /// modifies the account setting for all users, roles, and the root user of the account unless a user or role - /// explicitly overrides these settings. If this field is omitted, the setting is changed only for the - /// authenticated user. You must use the root user when you set the Fargate wait time - /// (fargateTaskRetirementWaitPeriod). Federated users assume the account setting of the root user and can't have explicit account settings - /// set for them. + /// The ARN of the principal, which can be a user, role, or the root user. If you specify + /// the root user, it modifies the account setting for all users, roles, and the root user of the + /// account unless a user or role explicitly overrides these settings. If this field is + /// omitted, the setting is changed only for the authenticated user. You must use the root user when you set the Fargate wait time + /// (fargateTaskRetirementWaitPeriod). Federated users assume the account setting of the root user and can't have explicit + /// account settings set for them. public let principalArn: String? /// The account setting value for the specified principal ARN. Accepted values are - /// enabled, disabled, enhanced, - /// on, and off. When you specify fargateTaskRetirementWaitPeriod for the - /// name, the following are the valid values: 0 - Amazon Web Services sends the notification, and immediately retires the affected - /// tasks. 7 - Amazon Web Services sends the notification, and waits 7 calendar days to retire the - /// tasks. 14 - Amazon Web Services sends the notification, and waits 14 calendar days to retire the - /// tasks. + /// enabled, disabled, enhanced, on, + /// and off. When you specify fargateTaskRetirementWaitPeriod for the + /// name, the following are the valid values: 0 - Amazon Web Services sends the notification, and immediately retires the + /// affected tasks. 7 - Amazon Web Services sends the notification, and waits 7 calendar days to + /// retire the tasks. 14 - Amazon Web Services sends the notification, and waits 14 calendar days to + /// retire the tasks. public let value: String @inlinable @@ -5441,11 +5738,11 @@ extension ECS { } public struct PutAttributesRequest: AWSEncodableShape { - /// The attributes to apply to your resource. You can specify up to 10 custom attributes for each - /// resource. You can specify up to 10 attributes in a single call. + /// The attributes to apply to your resource. You can specify up to 10 custom attributes + /// for each resource. You can specify up to 10 attributes in a single call. public let attributes: [Attribute] - /// The short name or full Amazon Resource Name (ARN) of the cluster that contains the resource to apply attributes. - /// If you do not specify a cluster, the default cluster is assumed. + /// The short name or full Amazon Resource Name (ARN) of the cluster that contains the resource to apply + /// attributes. If you do not specify a cluster, the default cluster is assumed. public let cluster: String? @inlinable @@ -5475,24 +5772,27 @@ extension ECS { } public struct PutClusterCapacityProvidersRequest: AWSEncodableShape { - /// The name of one or more capacity providers to associate with the cluster. If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already - /// be created. New capacity providers can be created with the CreateCapacityProvider - /// API operation. To use a Fargate capacity provider, specify either the FARGATE or - /// FARGATE_SPOT capacity providers. The Fargate capacity providers are available to all - /// accounts and only need to be associated with a cluster to be used. + /// The name of one or more capacity providers to associate with the cluster. If specifying a capacity provider that uses an Auto Scaling group, the capacity + /// provider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation. To use a Fargate capacity provider, specify either the FARGATE or + /// FARGATE_SPOT capacity providers. The Fargate capacity providers are + /// available to all accounts and only need to be associated with a cluster to be + /// used. public let capacityProviders: [String] - /// The short name or full Amazon Resource Name (ARN) of the cluster to modify the capacity provider settings for. If you - /// don't specify a cluster, the default cluster is assumed. + /// The short name or full Amazon Resource Name (ARN) of the cluster to modify the capacity provider + /// settings for. If you don't specify a cluster, the default cluster is assumed. public let cluster: String - /// The capacity provider strategy to use by default for the cluster. When creating a service or running a task on a cluster, if no capacity provider or launch type is - /// specified then the default capacity provider strategy for the cluster is used. A capacity provider strategy consists of one or more capacity providers along with the - /// base and weight to assign to them. A capacity provider must be associated - /// with the cluster to be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster. - /// Only capacity providers with an ACTIVE or UPDATING status can be used. If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already - /// be created. New capacity providers can be created with the CreateCapacityProvider - /// API operation. To use a Fargate capacity provider, specify either the FARGATE or - /// FARGATE_SPOT capacity providers. The Fargate capacity providers are available to all - /// accounts and only need to be associated with a cluster to be used. + /// The capacity provider strategy to use by default for the cluster. When creating a service or running a task on a cluster, if no capacity provider or + /// launch type is specified then the default capacity provider strategy for the cluster is + /// used. A capacity provider strategy consists of one or more capacity providers along with the + /// base and weight to assign to them. A capacity provider + /// must be associated with the cluster to be used in a capacity provider strategy. The + /// PutClusterCapacityProviders API is used to associate a capacity provider + /// with a cluster. Only capacity providers with an ACTIVE or + /// UPDATING status can be used. If specifying a capacity provider that uses an Auto Scaling group, the capacity + /// provider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation. To use a Fargate capacity provider, specify either the FARGATE or + /// FARGATE_SPOT capacity providers. The Fargate capacity providers are + /// available to all accounts and only need to be associated with a cluster to be + /// used. public let defaultCapacityProviderStrategy: [CapacityProviderStrategyItem] @inlinable @@ -5532,29 +5832,29 @@ extension ECS { public struct RegisterContainerInstanceRequest: AWSEncodableShape { /// The container instance attributes that this container instance supports. public let attributes: [Attribute]? - /// The short name or full Amazon Resource Name (ARN) of the cluster to register your container instance with. - /// If you do not specify a cluster, the default cluster is assumed. + /// The short name or full Amazon Resource Name (ARN) of the cluster to register your container instance + /// with. If you do not specify a cluster, the default cluster is assumed. public let cluster: String? /// The ARN of the container instance (if it was previously registered). public let containerInstanceArn: String? - /// The instance identity document for the EC2 instance to register. This document can be found by - /// running the following command from the instance: curl + /// The instance identity document for the EC2 instance to register. This document can be + /// found by running the following command from the instance: curl /// http://169.254.169.254/latest/dynamic/instance-identity/document/ public let instanceIdentityDocument: String? - /// The instance identity document signature for the EC2 instance to register. This signature can be - /// found by running the following command from the instance: curl + /// The instance identity document signature for the EC2 instance to register. This + /// signature can be found by running the following command from the instance: curl /// http://169.254.169.254/latest/dynamic/instance-identity/signature/ public let instanceIdentityDocumentSignature: String? - /// The devices that are available on the container instance. The only supported device type is a - /// GPU. + /// The devices that are available on the container instance. The only supported device + /// type is a GPU. public let platformDevices: [PlatformDevice]? - /// The metadata that you apply to the container instance to help you categorize and organize them. Each - /// tag consists of a key and an optional value. You define both. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. + /// The metadata that you apply to the container instance to help you categorize and + /// organize them. Each tag consists of a key and an optional value. You define both. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. public let tags: [Tag]? /// The resources available on the instance. public let totalResources: [Resource]? - /// The version information for the Amazon ECS container agent and Docker daemon that runs on the container - /// instance. + /// The version information for the Amazon ECS container agent and Docker daemon that runs on + /// the container instance. public let versionInfo: VersionInfo? @inlinable @@ -5605,81 +5905,91 @@ extension ECS { } public struct RegisterTaskDefinitionRequest: AWSEncodableShape { - /// A list of container definitions in JSON format that describe the different containers that make up - /// your task. + /// A list of container definitions in JSON format that describe the different containers + /// that make up your task. public let containerDefinitions: [ContainerDefinition] - /// The number of CPU units used by the task. It can be expressed as an integer using CPU units (for - /// example, 1024) or as a string using vCPUs (for example, 1 vCPU or 1 - /// vcpu) in a task definition. String values are converted to an integer indicating the CPU - /// units when the task definition is registered. Task-level CPU and memory parameters are ignored for Windows containers. We recommend specifying - /// container-level resources for Windows containers. If you're using the EC2 launch type, this field is optional. Supported values are - /// between 128 CPU units (0.125 vCPUs) and 10240 CPU units - /// (10 vCPUs). If you do not specify a value, the parameter is ignored. If you're using the Fargate launch type, this field is required and you must use one of - /// the following values, which determines your range of supported values for the memory - /// parameter: The CPU units cannot be less than 1 vCPU when you use Windows containers on + /// The number of CPU units used by the task. It can be expressed as an integer using CPU + /// units (for example, 1024) or as a string using vCPUs (for example, 1 + /// vCPU or 1 vcpu) in a task definition. String values are + /// converted to an integer indicating the CPU units when the task definition is + /// registered. Task-level CPU and memory parameters are ignored for Windows containers. We + /// recommend specifying container-level resources for Windows containers. If you're using the EC2 launch type, this field is optional. Supported + /// values are between 128 CPU units (0.125 vCPUs) and + /// 10240 CPU units (10 vCPUs). If you do not specify a value, + /// the parameter is ignored. If you're using the Fargate launch type, this field is required and you + /// must use one of the following values, which determines your range of supported values + /// for the memory parameter: The CPU units cannot be less than 1 vCPU when you use Windows containers on /// Fargate. 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) 2048 (2 vCPU) - Available memory values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) 4096 (4 vCPU) - Available memory values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) 8192 (8 vCPU) - Available memory values: 16 GB and 60 GB in 4 GB increments This option requires Linux platform 1.4.0 or later. 16384 (16vCPU) - Available memory values: 32GB and 120 GB in 8 GB increments This option requires Linux platform 1.4.0 or later. public let cpu: String? - /// The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total - /// amount of ephemeral storage available, beyond the default amount, for tasks hosted on Fargate. For - /// more information, see Using data volumes in tasks - /// in the Amazon ECS Developer Guide. For tasks using the Fargate launch type, the task requires the following - /// platforms: Linux platform version 1.4.0 or later. Windows platform version 1.0.0 or later. + /// Enables fault injection when you register your task definition and allows for fault injection requests + /// to be accepted from the task's containers. The default value is false. + public let enableFaultInjection: Bool? + /// The amount of ephemeral storage to allocate for the task. This parameter is used to + /// expand the total amount of ephemeral storage available, beyond the default amount, for + /// tasks hosted on Fargate. For more information, see Using data volumes in + /// tasks in the Amazon ECS Developer Guide. For tasks using the Fargate launch type, the task requires the + /// following platforms: Linux platform version 1.4.0 or later. Windows platform version 1.0.0 or later. public let ephemeralStorage: EphemeralStorage? /// The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make Amazon Web Services API calls on your behalf. For informationabout the required IAM roles for Amazon ECS, see IAM roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide. public let executionRoleArn: String? - /// You must specify a family for a task definition. You can use it track multiple versions - /// of the same task definition. The family is used as a name for your task definition. - /// Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. + /// You must specify a family for a task definition. You can use it track + /// multiple versions of the same task definition. The family is used as a name + /// for your task definition. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. public let family: String /// The Elastic Inference accelerators to use for the containers in the task. public let inferenceAccelerators: [InferenceAccelerator]? /// The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide. For tasks that use the host IPC mode, IPC namespace related systemControls are not supported. For tasks that use the task IPC mode, IPC namespace related systemControls will apply to all containers within a task. This parameter is not supported for Windows containers or tasks run on Fargate. public let ipcMode: IpcMode? - /// The amount of memory (in MiB) used by the task. It can be expressed as an integer using MiB (for - /// example ,1024) or as a string using GB (for example, 1GB or 1 - /// GB) in a task definition. String values are converted to an integer indicating the MiB when the - /// task definition is registered. Task-level CPU and memory parameters are ignored for Windows containers. We recommend specifying - /// container-level resources for Windows containers. If using the EC2 launch type, this field is optional. If using the Fargate launch type, this field is required and you must use one of the - /// following values. This determines your range of supported values for the cpu - /// parameter. The CPU units cannot be less than 1 vCPU when you use Windows containers on + /// The amount of memory (in MiB) used by the task. It can be expressed as an integer + /// using MiB (for example ,1024) or as a string using GB (for example, + /// 1GB or 1 GB) in a task definition. String values are + /// converted to an integer indicating the MiB when the task definition is + /// registered. Task-level CPU and memory parameters are ignored for Windows containers. We + /// recommend specifying container-level resources for Windows containers. If using the EC2 launch type, this field is optional. If using the Fargate launch type, this field is required and you must + /// use one of the following values. This determines your range of supported values for the + /// cpu parameter. The CPU units cannot be less than 1 vCPU when you use Windows containers on /// Fargate. 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU) 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU) 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU) Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU) Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU) Between 16 GB and 60 GB in 4 GB increments - Available cpu values: 8192 (8 vCPU) This option requires Linux platform 1.4.0 or later. Between 32GB and 120 GB in 8 GB increments - Available cpu values: 16384 (16 vCPU) This option requires Linux platform 1.4.0 or later. public let memory: String? /// The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. If no network mode is specified, the default is bridge. For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, or awsvpc can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode. With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings. When using the host network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide. If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used. public let networkMode: NetworkMode? /// The process namespace to use for the containers in the task. The valid values are host or task. On Fargate for Linux containers, the only valid value is task. For example, monitoring sidecars might need pidMode to access information about other containers running in the same task. If host is specified, all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace for each container. If the host PID mode is used, there's a heightened risk of undesired process namespace exposure. This parameter is not supported for Windows containers. This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate. public let pidMode: PidMode? - /// An array of placement constraint objects to use for the task. You can specify a maximum of 10 - /// constraints for each task. This limit includes constraints in the task definition and those specified - /// at runtime. + /// An array of placement constraint objects to use for the task. You can specify a + /// maximum of 10 constraints for each task. This limit includes constraints in the task + /// definition and those specified at runtime. public let placementConstraints: [TaskDefinitionPlacementConstraint]? /// The configuration details for the App Mesh proxy. For tasks hosted on Amazon EC2 instances, the container instances require at least version - /// 1.26.0 of the container agent and at least version 1.26.0-1 of the - /// ecs-init package to use a proxy configuration. If your container instances are - /// launched from the Amazon ECS-optimized AMI version 20190301 or later, then they contain - /// the required versions of the container agent and ecs-init. For more information, see - /// Amazon ECS-optimized AMI versions in the Amazon Elastic Container Service Developer Guide. + /// 1.26.0 of the container agent and at least version + /// 1.26.0-1 of the ecs-init package to use a proxy + /// configuration. If your container instances are launched from the Amazon ECS-optimized + /// AMI version 20190301 or later, then they contain the required versions of + /// the container agent and ecs-init. For more information, see Amazon ECS-optimized AMI versions in the Amazon Elastic Container Service Developer Guide. public let proxyConfiguration: ProxyConfiguration? - /// The task launch type that Amazon ECS validates the task definition against. A client exception is returned - /// if the task definition doesn't validate against the compatibilities specified. If no value is - /// specified, the parameter is omitted from the response. + /// The task launch type that Amazon ECS validates the task definition against. A client + /// exception is returned if the task definition doesn't validate against the + /// compatibilities specified. If no value is specified, the parameter is omitted from the + /// response. public let requiresCompatibilities: [Compatibility]? - /// The operating system that your tasks definitions run on. A platform family is specified only for - /// tasks using the Fargate launch type. + /// The operating system that your tasks definitions run on. A platform family is + /// specified only for tasks using the Fargate launch type. public let runtimePlatform: RuntimePlatform? - /// The metadata that you apply to the task definition to help you categorize and organize them. Each tag - /// consists of a key and an optional value. You define both of them. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. + /// The metadata that you apply to the task definition to help you categorize and organize + /// them. Each tag consists of a key and an optional value. You define both of them. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. public let tags: [Tag]? - /// The short name or full Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All - /// containers in this task are granted the permissions that are specified in this role. For more - /// information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide. + /// The short name or full Amazon Resource Name (ARN) of the IAM role that containers in this task can + /// assume. All containers in this task are granted the permissions that are specified in + /// this role. For more information, see IAM Roles for + /// Tasks in the Amazon Elastic Container Service Developer Guide. public let taskRoleArn: String? - /// A list of volume definitions in JSON format that containers in your task might use. + /// A list of volume definitions in JSON format that containers in your task might + /// use. public let volumes: [Volume]? @inlinable - public init(containerDefinitions: [ContainerDefinition], cpu: String? = nil, ephemeralStorage: EphemeralStorage? = nil, executionRoleArn: String? = nil, family: String, inferenceAccelerators: [InferenceAccelerator]? = nil, ipcMode: IpcMode? = nil, memory: String? = nil, networkMode: NetworkMode? = nil, pidMode: PidMode? = nil, placementConstraints: [TaskDefinitionPlacementConstraint]? = nil, proxyConfiguration: ProxyConfiguration? = nil, requiresCompatibilities: [Compatibility]? = nil, runtimePlatform: RuntimePlatform? = nil, tags: [Tag]? = nil, taskRoleArn: String? = nil, volumes: [Volume]? = nil) { + public init(containerDefinitions: [ContainerDefinition], cpu: String? = nil, enableFaultInjection: Bool? = nil, ephemeralStorage: EphemeralStorage? = nil, executionRoleArn: String? = nil, family: String, inferenceAccelerators: [InferenceAccelerator]? = nil, ipcMode: IpcMode? = nil, memory: String? = nil, networkMode: NetworkMode? = nil, pidMode: PidMode? = nil, placementConstraints: [TaskDefinitionPlacementConstraint]? = nil, proxyConfiguration: ProxyConfiguration? = nil, requiresCompatibilities: [Compatibility]? = nil, runtimePlatform: RuntimePlatform? = nil, tags: [Tag]? = nil, taskRoleArn: String? = nil, volumes: [Volume]? = nil) { self.containerDefinitions = containerDefinitions self.cpu = cpu + self.enableFaultInjection = enableFaultInjection self.ephemeralStorage = ephemeralStorage self.executionRoleArn = executionRoleArn self.family = family @@ -5707,6 +6017,7 @@ extension ECS { private enum CodingKeys: String, CodingKey { case containerDefinitions = "containerDefinitions" case cpu = "cpu" + case enableFaultInjection = "enableFaultInjection" case ephemeralStorage = "ephemeralStorage" case executionRoleArn = "executionRoleArn" case family = "family" @@ -5744,9 +6055,11 @@ extension ECS { } public struct RepositoryCredentials: AWSEncodableShape & AWSDecodableShape { - /// The Amazon Resource Name (ARN) of the secret containing the private repository credentials. When you use the Amazon ECS API, CLI, or Amazon Web Services SDK, if the secret exists in the same Region as the - /// task that you're launching then you can use either the full ARN or the name of the secret. When - /// you use the Amazon Web Services Management Console, you must specify the full ARN of the secret. + /// The Amazon Resource Name (ARN) of the secret containing the private repository + /// credentials. When you use the Amazon ECS API, CLI, or Amazon Web Services SDK, if the secret exists in the same + /// Region as the task that you're launching then you can use either the full ARN or + /// the name of the secret. When you use the Amazon Web Services Management Console, you must specify the full ARN + /// of the secret. public let credentialsParameter: String @inlinable @@ -5760,22 +6073,23 @@ extension ECS { } public struct Resource: AWSEncodableShape & AWSDecodableShape { - /// When the doubleValue type is set, the value of the resource must be a double precision - /// floating-point type. + /// When the doubleValue type is set, the value of the resource must be a + /// double precision floating-point type. public let doubleValue: Double? - /// When the integerValue type is set, the value of the resource must be an integer. + /// When the integerValue type is set, the value of the resource must be an + /// integer. public let integerValue: Int? - /// When the longValue type is set, the value of the resource must be an extended precision - /// floating-point type. + /// When the longValue type is set, the value of the resource must be an + /// extended precision floating-point type. public let longValue: Int64? - /// The name of the resource, such as CPU, MEMORY, PORTS, - /// PORTS_UDP, or a user-defined resource. + /// The name of the resource, such as CPU, MEMORY, + /// PORTS, PORTS_UDP, or a user-defined resource. public let name: String? - /// When the stringSetValue type is set, the value of the resource must be a string - /// type. + /// When the stringSetValue type is set, the value of the resource must be a + /// string type. public let stringSetValue: [String]? - /// The type of the resource. Valid values: INTEGER, DOUBLE, LONG, - /// or STRINGSET. + /// The type of the resource. Valid values: INTEGER, DOUBLE, + /// LONG, or STRINGSET. public let type: String? @inlinable @@ -5801,12 +6115,11 @@ extension ECS { public struct ResourceRequirement: AWSEncodableShape & AWSDecodableShape { /// The type of resource to assign to a container. public let type: ResourceType - /// The value for the specified resource type. When the type is GPU, the value is the number of physical GPUs the Amazon ECS - /// container agent reserves for the container. The number of GPUs that's reserved for all containers in a - /// task can't exceed the number of available GPUs on the container instance that the task is launched - /// on. When the type is InferenceAccelerator, the value matches the - /// deviceName for an InferenceAccelerator - /// specified in a task definition. + /// The value for the specified resource type. When the type is GPU, the value is the number of physical + /// GPUs the Amazon ECS container agent reserves for the container. The number + /// of GPUs that's reserved for all containers in a task can't exceed the number of + /// available GPUs on the container instance that the task is launched on. When the type is InferenceAccelerator, the value matches the + /// deviceName for an InferenceAccelerator specified in a task definition. public let value: String @inlinable @@ -5822,15 +6135,16 @@ extension ECS { } public struct Rollback: AWSDecodableShape { - /// The reason the rollback happened. For example, the circuit breaker initiated the rollback operation. + /// The reason the rollback happened. For example, the circuit breaker initiated the + /// rollback operation. public let reason: String? /// The ARN of the service revision deployed as part of the rollback. When the type is GPU, the value is the number of physical - /// GPUs the Amazon ECS container agent reserves for the container. The number + /// GPUs the Amazon ECS container agent reserves for the container. The number /// of GPUs that's reserved for all containers in a task can't exceed the number of /// available GPUs on the container instance that the task is launched on. When the type is InferenceAccelerator, the value matches the - /// deviceName for an InferenceAccelerator specified in a task definition. + /// deviceName for an InferenceAccelerator specified in a task definition. public let serviceRevisionArn: String? - /// Time time that the rollback started. The format is yyyy-MM-dd HH:mm:ss.SSSSSS. + /// Time time that the rollback started. The format is yyyy-MM-dd HH:mm:ss.SSSSSS. public let startedAt: Date? @inlinable @@ -5848,93 +6162,102 @@ extension ECS { } public struct RunTaskRequest: AWSEncodableShape { - /// The capacity provider strategy to use for the task. If a capacityProviderStrategy is specified, the launchType parameter must - /// be omitted. If no capacityProviderStrategy or launchType is specified, the - /// defaultCapacityProviderStrategy for the cluster is used. When you use cluster auto scaling, you must specify capacityProviderStrategy and not - /// launchType. A capacity provider strategy may contain a maximum of 6 capacity providers. + /// The capacity provider strategy to use for the task. If a capacityProviderStrategy is specified, the launchType + /// parameter must be omitted. If no capacityProviderStrategy or + /// launchType is specified, the + /// defaultCapacityProviderStrategy for the cluster is used. When you use cluster auto scaling, you must specify + /// capacityProviderStrategy and not launchType. A capacity provider strategy can contain a maximum of 20 capacity providers. public let capacityProviderStrategy: [CapacityProviderStrategyItem]? - /// An identifier that you provide to ensure the idempotency of the request. It must be unique and is - /// case sensitive. Up to 64 characters are allowed. The valid characters are characters in the range of - /// 33-126, inclusive. For more information, see Ensuring - /// idempotency. + /// An identifier that you provide to ensure the idempotency of the request. It must be + /// unique and is case sensitive. Up to 64 characters are allowed. The valid characters are + /// characters in the range of 33-126, inclusive. For more information, see Ensuring idempotency. public let clientToken: String? - /// The short name or full Amazon Resource Name (ARN) of the cluster to run your task on. If you do not specify a cluster, the default cluster is assumed. + /// The short name or full Amazon Resource Name (ARN) of the cluster to run your task on. + /// If you do not specify a cluster, the default cluster is assumed. public let cluster: String? - /// The number of instantiations of the specified task to place on your cluster. You can specify up to 10 - /// tasks for each call. + /// The number of instantiations of the specified task to place on your cluster. You can + /// specify up to 10 tasks for each call. public let count: Int? - /// Specifies whether to use Amazon ECS managed tags for the task. For more information, see Tagging Your Amazon ECS + /// Specifies whether to use Amazon ECS managed tags for the task. For more information, see + /// Tagging Your Amazon ECS /// Resources in the Amazon Elastic Container Service Developer Guide. public let enableECSManagedTags: Bool? - /// Determines whether to use the execute command functionality for the containers in this task. If - /// true, this enables execute command functionality on all containers in the task. If true, then the task definition must have a task role, or you must provide one as an - /// override. + /// Determines whether to use the execute command functionality for the containers in this + /// task. If true, this enables execute command functionality on all containers + /// in the task. If true, then the task definition must have a task role, or you must + /// provide one as an override. public let enableExecuteCommand: Bool? - /// The name of the task group to associate with the task. The default value is the family name of the - /// task definition (for example, family:my-family-name). + /// The name of the task group to associate with the task. The default value is the family + /// name of the task definition (for example, family:my-family-name). public let group: String? - /// The infrastructure to run your standalone task on. For more information, see Amazon ECS launch - /// types in the Amazon Elastic Container Service Developer Guide. The FARGATE launch type runs your tasks on Fargate On-Demand infrastructure. Fargate Spot infrastructure is available for use but a capacity provider strategy must be used. - /// For more information, see Fargate capacity providers in the Amazon ECS Developer - /// Guide. The EC2 launch type runs your tasks on Amazon EC2 instances registered to your - /// cluster. The EXTERNAL launch type runs your tasks on your on-premises server or virtual machine - /// (VM) capacity registered to your cluster. A task can use either a launch type or a capacity provider strategy. If a launchType is - /// specified, the capacityProviderStrategy parameter must be omitted. When you use cluster auto scaling, you must specify capacityProviderStrategy and not - /// launchType. + /// The infrastructure to run your standalone task on. For more information, see Amazon ECS + /// launch types in the Amazon Elastic Container Service Developer Guide. The FARGATE launch type runs your tasks on Fargate On-Demand + /// infrastructure. Fargate Spot infrastructure is available for use but a capacity provider + /// strategy must be used. For more information, see Fargate capacity providers in the + /// Amazon ECS Developer Guide. The EC2 launch type runs your tasks on Amazon EC2 instances registered to your + /// cluster. The EXTERNAL launch type runs your tasks on your on-premises server or + /// virtual machine (VM) capacity registered to your cluster. A task can use either a launch type or a capacity provider strategy. If a + /// launchType is specified, the capacityProviderStrategy + /// parameter must be omitted. When you use cluster auto scaling, you must specify + /// capacityProviderStrategy and not launchType. public let launchType: LaunchType? - /// The network configuration for the task. This parameter is required for task definitions that use the - /// awsvpc network mode to receive their own elastic network interface, and it isn't - /// supported for other network modes. For more information, see Task networking in the - /// Amazon Elastic Container Service Developer Guide. + /// The network configuration for the task. This parameter is required for task + /// definitions that use the awsvpc network mode to receive their own elastic + /// network interface, and it isn't supported for other network modes. For more information, + /// see Task networking + /// in the Amazon Elastic Container Service Developer Guide. public let networkConfiguration: NetworkConfiguration? - /// A list of container overrides in JSON format that specify the name of a container in the specified - /// task definition and the overrides it should receive. You can override the default command for a - /// container (that's specified in the task definition or Docker image) with a command - /// override. You can also override existing environment variables (that are specified in the task - /// definition or Docker image) on a container or add new environment variables to it with an - /// environment override. A total of 8192 characters are allowed for overrides. This limit includes the JSON formatting - /// characters of the override structure. + /// A list of container overrides in JSON format that specify the name of a container in + /// the specified task definition and the overrides it should receive. You can override the + /// default command for a container (that's specified in the task definition or Docker + /// image) with a command override. You can also override existing environment + /// variables (that are specified in the task definition or Docker image) on a container or + /// add new environment variables to it with an environment override. A total of 8192 characters are allowed for overrides. This limit includes the JSON + /// formatting characters of the override structure. public let overrides: TaskOverride? - /// An array of placement constraint objects to use for the task. You can specify up to 10 constraints - /// for each task (including constraints in the task definition and those specified at runtime). + /// An array of placement constraint objects to use for the task. You can specify up to 10 + /// constraints for each task (including constraints in the task definition and those + /// specified at runtime). public let placementConstraints: [PlacementConstraint]? - /// The placement strategy objects to use for the task. You can specify a maximum of 5 strategy rules for - /// each task. + /// The placement strategy objects to use for the task. You can specify a maximum of 5 + /// strategy rules for each task. public let placementStrategy: [PlacementStrategy]? - /// The platform version the task uses. A platform version is only specified for tasks hosted on - /// Fargate. If one isn't specified, the LATEST platform version is used. For - /// more information, see Fargate platform versions in - /// the Amazon Elastic Container Service Developer Guide. + /// The platform version the task uses. A platform version is only specified for tasks + /// hosted on Fargate. If one isn't specified, the LATEST + /// platform version is used. For more information, see Fargate platform + /// versions in the Amazon Elastic Container Service Developer Guide. public let platformVersion: String? - /// Specifies whether to propagate the tags from the task definition to the task. If no value is - /// specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To - /// add tags to a task after task creation, use theTagResource API action. An error will be received if you specify the SERVICE option when running a - /// task. + /// Specifies whether to propagate the tags from the task definition to the task. If no + /// value is specified, the tags aren't propagated. Tags can only be propagated to the task + /// during task creation. To add tags to a task after task creation, use theTagResource API action. An error will be received if you specify the SERVICE option when + /// running a task. public let propagateTags: PropagateTags? /// This parameter is only used by Amazon ECS. It is not intended for use by customers. public let referenceId: String? - /// An optional tag specified when a task is started. For example, if you automatically trigger a task to - /// run a batch process job, you could apply a unique identifier for that job to your task with the - /// startedBy parameter. You can then identify which tasks belong to that job by filtering - /// the results of a ListTasks call with the startedBy value. Up to 128 letters (uppercase and - /// lowercase), numbers, hyphens (-), forward slash (/), and underscores (_) are allowed. If a task is started by an Amazon ECS service, then the startedBy parameter contains the - /// deployment ID of the service that starts it. + /// An optional tag specified when a task is started. For example, if you automatically + /// trigger a task to run a batch process job, you could apply a unique identifier for that + /// job to your task with the startedBy parameter. You can then identify which + /// tasks belong to that job by filtering the results of a ListTasks call with + /// the startedBy value. Up to 128 letters (uppercase and lowercase), numbers, + /// hyphens (-), forward slash (/), and underscores (_) are allowed. If a task is started by an Amazon ECS service, then the startedBy parameter + /// contains the deployment ID of the service that starts it. public let startedBy: String? - /// The metadata that you apply to the task to help you categorize and organize them. Each tag consists - /// of a key and an optional value, both of which you define. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. + /// The metadata that you apply to the task to help you categorize and organize them. Each + /// tag consists of a key and an optional value, both of which you define. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. public let tags: [Tag]? - /// The family and revision (family:revision) or full ARN of the - /// task definition to run. If a revision isn't specified, the latest ACTIVE - /// revision is used. The full ARN value must match the value that you specified as the Resource of the - /// principal's permissions policy. When you specify a task definition, you must either specify a specific revision, or all revisions in - /// the ARN. To specify a specific revision, include the revision number in the ARN. For example, to specify - /// revision 2, use - /// arn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:2. To specify all revisions, use the wildcard (*) in the ARN. For example, to specify all revisions, - /// use arn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:*. For more information, see Policy Resources for Amazon ECS in the Amazon Elastic Container Service Developer Guide. + /// The family and revision (family:revision) or + /// full ARN of the task definition to run. If a revision isn't specified, + /// the latest ACTIVE revision is used. The full ARN value must match the value that you specified as the + /// Resource of the principal's permissions policy. When you specify a task definition, you must either specify a specific revision, or + /// all revisions in the ARN. To specify a specific revision, include the revision number in the ARN. For example, + /// to specify revision 2, use + /// arn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:2. To specify all revisions, use the wildcard (*) in the ARN. For example, to specify + /// all revisions, use + /// arn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:*. For more information, see Policy Resources for Amazon ECS in the Amazon Elastic Container Service Developer Guide. public let taskDefinition: String - /// The details of the volume that was configuredAtLaunch. You can configure the size, - /// volumeType, IOPS, throughput, snapshot and encryption in in TaskManagedEBSVolumeConfiguration. The name of the volume must match the - /// name from the task definition. + /// The details of the volume that was configuredAtLaunch. You can configure + /// the size, volumeType, IOPS, throughput, snapshot and encryption in in TaskManagedEBSVolumeConfiguration. The name of the volume must + /// match the name from the task definition. public let volumeConfigurations: [TaskVolumeConfiguration]? @inlinable @@ -5997,11 +6320,11 @@ extension ECS { } public struct RunTaskResponse: AWSDecodableShape { - /// Any failures associated with the call. For information about how to address failures, see Service event messages and API failure reasons in the - /// Amazon Elastic Container Service Developer Guide. + /// Any failures associated with the call. For information about how to address failures, see Service event messages and API failure + /// reasons in the Amazon Elastic Container Service Developer Guide. public let failures: [Failure]? - /// A full description of the tasks that were run. The tasks that were successfully placed on your - /// cluster are described here. + /// A full description of the tasks that were run. The tasks that were successfully placed + /// on your cluster are described here. public let tasks: [Task]? @inlinable @@ -6017,9 +6340,9 @@ extension ECS { } public struct RuntimePlatform: AWSEncodableShape & AWSDecodableShape { - /// The CPU architecture. You can run your Linux tasks on an ARM-based platform by setting the value to ARM64. - /// This option is available for tasks that run on Linux Amazon EC2 instance or Linux containers on - /// Fargate. + /// The CPU architecture. You can run your Linux tasks on an ARM-based platform by setting the value to + /// ARM64. This option is available for tasks that run on Linux Amazon EC2 + /// instance or Linux containers on Fargate. public let cpuArchitecture: CPUArchitecture? /// The operating system. public let operatingSystemFamily: OSFamily? @@ -6039,8 +6362,8 @@ extension ECS { public struct Scale: AWSEncodableShape & AWSDecodableShape { /// The unit of measure for the scale value. public let unit: ScaleUnit? - /// The value, specified as a percent total of a service's desiredCount, to scale the task - /// set. Accepted values are numbers between 0 and 100. + /// The value, specified as a percent total of a service's desiredCount, to + /// scale the task set. Accepted values are numbers between 0 and 100. public let value: Double? @inlinable @@ -6058,12 +6381,14 @@ extension ECS { public struct Secret: AWSEncodableShape & AWSDecodableShape { /// The name of the secret. public let name: String - /// The secret to expose to the container. The supported values are either the full ARN of the Secrets Manager secret or the full ARN of the parameter in the SSM Parameter Store. For information about the require Identity and Access Management permissions, see Required - /// IAM permissions for Amazon ECS secrets (for Secrets Manager) or Required IAM - /// permissions for Amazon ECS secrets (for Systems Manager Parameter store) in the - /// Amazon Elastic Container Service Developer Guide. If the SSM Parameter Store parameter exists in the same Region as the task you're launching, - /// then you can use either the full ARN or name of the parameter. If the parameter exists in a - /// different Region, then the full ARN must be specified. + /// The secret to expose to the container. The supported values are either the full ARN + /// of the Secrets Manager secret or the full ARN of the parameter in the SSM + /// Parameter Store. For information about the require Identity and Access Management permissions, see Required IAM permissions for Amazon ECS secrets (for Secrets Manager) or + /// Required IAM permissions for Amazon ECS secrets (for Systems Manager Parameter + /// store) in the Amazon Elastic Container Service Developer Guide. If the SSM Parameter Store parameter exists in the same Region as the task + /// you're launching, then you can use either the full ARN or name of the parameter. + /// If the parameter exists in a different Region, then the full ARN must be + /// specified. public let valueFrom: String @inlinable @@ -6082,8 +6407,8 @@ extension ECS { /// Indicates whether to use Availability Zone rebalancing for the service. For more information, see Balancing an Amazon ECS service across Availability Zones in /// the Amazon Elastic Container Service Developer Guide. public let availabilityZoneRebalancing: AvailabilityZoneRebalancing? - /// The capacity provider strategy the service uses. When using the DescribeServices API, this field is - /// omitted if the service was created using a launch type. + /// The capacity provider strategy the service uses. When using the DescribeServices API, + /// this field is omitted if the service was created using a launch type. public let capacityProviderStrategy: [CapacityProviderStrategyItem]? /// The Amazon Resource Name (ARN) of the cluster that hosts the service. public let clusterArn: String? @@ -6091,36 +6416,36 @@ extension ECS { public let createdAt: Date? /// The principal that created the service. public let createdBy: String? - /// Optional deployment parameters that control how many tasks run during the deployment and the ordering - /// of stopping and starting tasks. + /// Optional deployment parameters that control how many tasks run during the deployment + /// and the ordering of stopping and starting tasks. public let deploymentConfiguration: DeploymentConfiguration? /// The deployment controller type the service is using. public let deploymentController: DeploymentController? /// The current state of deployments for the service. public let deployments: [Deployment]? - /// The desired number of instantiations of the task definition to keep running on the service. This - /// value is specified when the service is created with CreateService , and it can be - /// modified with UpdateService. + /// The desired number of instantiations of the task definition to keep running on the + /// service. This value is specified when the service is created with CreateService , and it can be modified with UpdateService. public let desiredCount: Int? - /// Determines whether to use Amazon ECS managed tags for the tasks in the service. For more information, see - /// Tagging Your - /// Amazon ECS Resources in the Amazon Elastic Container Service Developer Guide. + /// Determines whether to use Amazon ECS managed tags for the tasks in the service. For more + /// information, see Tagging Your Amazon ECS + /// Resources in the Amazon Elastic Container Service Developer Guide. public let enableECSManagedTags: Bool? /// Determines whether the execute command functionality is turned on for the service. If - /// true, the execute command functionality is turned on for all containers in tasks as - /// part of the service. + /// true, the execute command functionality is turned on for all containers + /// in tasks as part of the service. public let enableExecuteCommand: Bool? - /// The event stream for your service. A maximum of 100 of the latest events are displayed. + /// The event stream for your service. A maximum of 100 of the latest events are + /// displayed. public let events: [ServiceEvent]? - /// The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing target - /// health checks after a task has first started. + /// The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy + /// Elastic Load Balancing target health checks after a task has first started. public let healthCheckGracePeriodSeconds: Int? - /// The launch type the service is using. When using the DescribeServices API, this field is omitted if - /// the service was created using a capacity provider strategy. + /// The launch type the service is using. When using the DescribeServices API, this field + /// is omitted if the service was created using a capacity provider strategy. public let launchType: LaunchType? - /// A list of Elastic Load Balancing load balancer objects. It contains the load balancer name, the container name, and - /// the container port to access from the load balancer. The container name is as it appears in a container - /// definition. + /// A list of Elastic Load Balancing load balancer objects. It contains the load balancer name, the + /// container name, and the container port to access from the load balancer. The container + /// name is as it appears in a container definition. public let loadBalancers: [LoadBalancer]? /// The VPC subnet and security group configuration for tasks that receive their own elastic network interface by using the awsvpc networking mode. public let networkConfiguration: NetworkConfiguration? @@ -6130,55 +6455,60 @@ extension ECS { public let placementConstraints: [PlacementConstraint]? /// The placement strategy that determines how tasks for the service are placed. public let placementStrategy: [PlacementStrategy]? - /// The operating system that your tasks in the service run on. A platform family is specified only for - /// tasks using the Fargate launch type. All tasks that run as part of this service must use the same platformFamily value as - /// the service (for example, LINUX). + /// The operating system that your tasks in the service run on. A platform family is + /// specified only for tasks using the Fargate launch type. All tasks that run as part of this service must use the same + /// platformFamily value as the service (for example, + /// LINUX). public let platformFamily: String? - /// The platform version to run your service on. A platform version is only specified for tasks that are - /// hosted on Fargate. If one isn't specified, the LATEST platform version is used. For more - /// information, see Fargate Platform Versions in - /// the Amazon Elastic Container Service Developer Guide. + /// The platform version to run your service on. A platform version is only specified for + /// tasks that are hosted on Fargate. If one isn't specified, the LATEST + /// platform version is used. For more information, see Fargate Platform + /// Versions in the Amazon Elastic Container Service Developer Guide. public let platformVersion: String? - /// Determines whether to propagate the tags from the task definition or the service to the task. If no - /// value is specified, the tags aren't propagated. + /// Determines whether to propagate the tags from the task definition or the service to + /// the task. If no value is specified, the tags aren't propagated. public let propagateTags: PropagateTags? - /// The ARN of the IAM role that's associated with the service. It allows the Amazon ECS container agent - /// to register container instances with an Elastic Load Balancing load balancer. + /// The ARN of the IAM role that's associated with the service. It allows the Amazon ECS + /// container agent to register container instances with an Elastic Load Balancing load balancer. public let roleArn: String? /// The number of tasks in the cluster that are in the RUNNING state. public let runningCount: Int? - /// The scheduling strategy to use for the service. For more information, see Services. There are two service scheduler strategies available. REPLICA-The replica scheduling strategy places and maintains the desired - /// number of tasks across your cluster. By default, the service scheduler spreads tasks across - /// Availability Zones. You can use task placement strategies and constraints to customize task - /// placement decisions. DAEMON-The daemon scheduling strategy deploys exactly one task on each - /// active container instance. This task meets all of the task placement constraints that you - /// specify in your cluster. The service scheduler also evaluates the task placement constraints - /// for running tasks. It stop tasks that don't meet the placement constraints. Fargate tasks don't support the DAEMON scheduling - /// strategy. + /// The scheduling strategy to use for the service. For more information, see Services. There are two service scheduler strategies available. REPLICA-The replica scheduling strategy places and + /// maintains the desired number of tasks across your cluster. By default, the + /// service scheduler spreads tasks across Availability Zones. You can use task + /// placement strategies and constraints to customize task placement + /// decisions. DAEMON-The daemon scheduling strategy deploys exactly one + /// task on each active container instance. This task meets all of the task + /// placement constraints that you specify in your cluster. The service scheduler + /// also evaluates the task placement constraints for running tasks. It stop tasks + /// that don't meet the placement constraints. Fargate tasks don't support the DAEMON + /// scheduling strategy. public let schedulingStrategy: SchedulingStrategy? - /// The ARN that identifies the service. For more information about the ARN format, see Amazon Resource Name (ARN) - /// in the Amazon ECS Developer Guide. + /// The ARN that identifies the service. For more information about the ARN format, + /// see Amazon Resource Name (ARN) in the Amazon ECS Developer Guide. public let serviceArn: String? - /// The name of your service. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. Service names must be unique within a cluster. - /// However, you can have similarly named services in multiple clusters within a Region or across multiple - /// Regions. + /// The name of your service. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. Service names must be unique within + /// a cluster. However, you can have similarly named services in multiple clusters within a + /// Region or across multiple Regions. public let serviceName: String? - /// The details for the service discovery registries to assign to this service. For more information, see - /// Service + /// The details for the service discovery registries to assign to this service. For more + /// information, see Service /// Discovery. public let serviceRegistries: [ServiceRegistry]? - /// The status of the service. The valid values are ACTIVE, DRAINING, or - /// INACTIVE. + /// The status of the service. The valid values are ACTIVE, + /// DRAINING, or INACTIVE. public let status: String? - /// The metadata that you apply to the service to help you categorize and organize them. Each tag - /// consists of a key and an optional value. You define bot the key and value. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. + /// The metadata that you apply to the service to help you categorize and organize them. + /// Each tag consists of a key and an optional value. You define bot the key and + /// value. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. public let tags: [Tag]? - /// The task definition to use for tasks in the service. This value is specified when the service is - /// created with CreateService, and it can be modified with UpdateService. + /// The task definition to use for tasks in the service. This value is specified when the + /// service is created with CreateService, + /// and it can be modified with UpdateService. public let taskDefinition: String? - /// Information about a set of Amazon ECS tasks in either an CodeDeploy or an EXTERNAL deployment. An - /// Amazon ECS task set includes details such as the desired number of tasks, how many tasks are running, and - /// whether the task set serves production traffic. + /// Information about a set of Amazon ECS tasks in either an CodeDeploy or an EXTERNAL + /// deployment. An Amazon ECS task set includes details such as the desired number of tasks, how + /// many tasks are running, and whether the task set serves production traffic. public let taskSets: [TaskSet]? @inlinable @@ -6254,17 +6584,18 @@ extension ECS { } public struct ServiceConnectClientAlias: AWSEncodableShape & AWSDecodableShape { - /// The dnsName is the name that you use in the applications of client tasks to connect to - /// this service. The name must be a valid DNS name but doesn't need to be fully-qualified. The name can - /// include up to 127 characters. The name can include lowercase letters, numbers, underscores (_), hyphens - /// (-), and periods (.). The name can't start with a hyphen. If this parameter isn't specified, the default value of discoveryName.namespace is used. If the discoveryName isn't specified, the port mapping name from the task definition is used in portName.namespace. To avoid changing your applications in client Amazon ECS services, set this to the same name that the - /// client application uses by default. For example, a few common names are database, - /// db, or the lowercase name of a database, such as mysql or - /// redis. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide. + /// The dnsName is the name that you use in the applications of client tasks + /// to connect to this service. The name must be a valid DNS name but doesn't need to be + /// fully-qualified. The name can include up to 127 characters. The name can include + /// lowercase letters, numbers, underscores (_), hyphens (-), and periods (.). The name + /// can't start with a hyphen. If this parameter isn't specified, the default value of discoveryName.namespace is used. If the discoveryName isn't specified, the port mapping name from the task definition is used in portName.namespace. To avoid changing your applications in client Amazon ECS services, set this to the same + /// name that the client application uses by default. For example, a few common names are + /// database, db, or the lowercase name of a database, such as + /// mysql or redis. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide. public let dnsName: String? - /// The listening port number for the Service Connect proxy. This port is available inside of all of the - /// tasks within the same namespace. To avoid changing your applications in client Amazon ECS services, set this to the same port that the - /// client application uses by default. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide. + /// The listening port number for the Service Connect proxy. This port is available + /// inside of all of the tasks within the same namespace. To avoid changing your applications in client Amazon ECS services, set this to the same + /// port that the client application uses by default. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide. public let port: Int @inlinable @@ -6288,17 +6619,19 @@ extension ECS { /// Specifies whether to use Service Connect with this service. public let enabled: Bool public let logConfiguration: LogConfiguration? - /// The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace for use with Service Connect. The namespace must be in the same Amazon Web Services - /// Region as the Amazon ECS service and cluster. The type of namespace doesn't affect Service Connect. For - /// more information about Cloud Map, see Working with Services in the - /// Cloud Map Developer Guide. + /// The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace for use with Service Connect. The namespace must be in + /// the same Amazon Web Services Region as the Amazon ECS service and cluster. The type of namespace doesn't + /// affect Service Connect. For more information about Cloud Map, see Working + /// with Services in the Cloud Map Developer Guide. public let namespace: String? - /// The list of Service Connect service objects. These are names and aliases (also known as endpoints) - /// that are used by other Amazon ECS services to connect to this service. This field is not required for a "client" Amazon ECS service that's a member of a namespace only to - /// connect to other services within the namespace. An example of this would be a frontend application that - /// accepts incoming requests from either a load balancer that's attached to the service or by other - /// means. An object selects a port from the task definition, assigns a name for the Cloud Map service, and a - /// list of aliases (endpoints) and ports for client applications to refer to this service. + /// The list of Service Connect service objects. These are names and aliases (also known + /// as endpoints) that are used by other Amazon ECS services to connect to this service. + /// This field is not required for a "client" Amazon ECS service that's a member of a namespace + /// only to connect to other services within the namespace. An example of this would be a + /// frontend application that accepts incoming requests from either a load balancer that's + /// attached to the service or by other means. An object selects a port from the task definition, assigns a name for the Cloud Map + /// service, and a list of aliases (endpoints) and ports for client applications to refer to + /// this service. public let services: [ServiceConnectService]? @inlinable @@ -6324,28 +6657,32 @@ extension ECS { } public struct ServiceConnectService: AWSEncodableShape & AWSDecodableShape { - /// The list of client aliases for this Service Connect service. You use these to assign names that can - /// be used by client applications. The maximum number of client aliases that you can have in this list is - /// 1. Each alias ("endpoint") is a fully-qualified name and port number that other Amazon ECS tasks ("clients") - /// can use to connect to this service. Each name and port mapping must be unique within the namespace. For each ServiceConnectService, you must provide at least one clientAlias - /// with one port. + /// The list of client aliases for this Service Connect service. You use these to assign + /// names that can be used by client applications. The maximum number of client aliases that + /// you can have in this list is 1. Each alias ("endpoint") is a fully-qualified name and port number that other Amazon ECS + /// tasks ("clients") can use to connect to this service. Each name and port mapping must be unique within the namespace. For each ServiceConnectService, you must provide at least one + /// clientAlias with one port. public let clientAliases: [ServiceConnectClientAlias]? /// The discoveryName is the name of the new Cloud Map service that Amazon ECS creates /// for this Amazon ECS service. This must be unique within the Cloud Map namespace. The name can contain up to 64 characters. The name can include lowercase letters, /// numbers, underscores (_), and hyphens (-). The name can't start with a hyphen. If the discoveryName isn't specified, the port mapping name from the task definition is used in portName.namespace. public let discoveryName: String? - /// The port number for the Service Connect proxy to listen on. Use the value of this field to bypass the proxy for traffic on the port number specified in the named - /// portMapping in the task definition of this application, and then use it in your VPC - /// security groups to allow traffic into the proxy for this Amazon ECS service. In awsvpc mode and Fargate, the default value is the container port number. The - /// container port number is in the portMapping in the task definition. In bridge mode, the - /// default value is the ephemeral port of the Service Connect proxy. + /// The port number for the Service Connect proxy to listen on. Use the value of this field to bypass the proxy for traffic on the port number + /// specified in the named portMapping in the task definition of this + /// application, and then use it in your VPC security groups to allow traffic into the proxy + /// for this Amazon ECS service. In awsvpc mode and Fargate, the default value is the container port + /// number. The container port number is in the portMapping in the task + /// definition. In bridge mode, the default value is the ephemeral port of the + /// Service Connect proxy. public let ingressPortOverride: Int? - /// The portName must match the name of one of the portMappings from all the - /// containers in the task definition of this Amazon ECS service. + /// The portName must match the name of one of the portMappings + /// from all the containers in the task definition of this Amazon ECS service. public let portName: String - /// A reference to an object that represents the configured timeouts for Service Connect. + /// A reference to an object that represents the configured timeouts for + /// Service Connect. public let timeout: TimeoutConfiguration? - /// A reference to an object that represents a Transport Layer Security (TLS) configuration. + /// A reference to an object that represents a Transport Layer Security (TLS) + /// configuration. public let tls: ServiceConnectTlsConfiguration? @inlinable @@ -6379,8 +6716,8 @@ extension ECS { public struct ServiceConnectServiceResource: AWSDecodableShape { /// The Amazon Resource Name (ARN) for the namespace in Cloud Map that matches the discovery name for this - /// Service Connect resource. You can use this ARN in other integrations with Cloud Map. However, - /// Service Connect can't ensure connectivity outside of Amazon ECS. + /// Service Connect resource. You can use this ARN in other integrations with Cloud Map. + /// However, Service Connect can't ensure connectivity outside of Amazon ECS. public let discoveryArn: String? /// The discovery name of this Service Connect resource. The discoveryName is the name of the new Cloud Map service that Amazon ECS creates /// for this Amazon ECS service. This must be unique within the Cloud Map namespace. The name can contain up to 64 characters. The name can include lowercase letters, @@ -6440,12 +6777,14 @@ extension ECS { public let alarms: ServiceDeploymentAlarms? /// The ARN of the cluster that hosts the service. public let clusterArn: String? - /// The time the service deployment was created. The format is yyyy-MM-dd HH:mm:ss.SSSSSS. + /// The time the service deployment was created. The format is yyyy-MM-dd + /// HH:mm:ss.SSSSSS. public let createdAt: Date? /// The circuit breaker configuration that determines a service deployment failed. public let deploymentCircuitBreaker: ServiceDeploymentCircuitBreaker? public let deploymentConfiguration: DeploymentConfiguration? - /// The time the service deployment finished. The format is yyyy-MM-dd HH:mm:ss.SSSSSS. + /// The time the service deployment finished. The format is yyyy-MM-dd + /// HH:mm:ss.SSSSSS. public let finishedAt: Date? /// The rollback options the service deployment uses when the deployment fails. public let rollback: Rollback? @@ -6455,13 +6794,16 @@ extension ECS { public let serviceDeploymentArn: String? /// The currently deployed workload configuration. public let sourceServiceRevisions: [ServiceRevisionSummary]? - /// The time the service deployment statred. The format is yyyy-MM-dd HH:mm:ss.SSSSSS. + /// The time the service deployment statred. The format is yyyy-MM-dd + /// HH:mm:ss.SSSSSS. public let startedAt: Date? /// The service deployment state. public let status: ServiceDeploymentStatus? - /// Information about why the service deployment is in the current status. For example, the circuit breaker detected a failure. + /// Information about why the service deployment is in the current status. For example, + /// the circuit breaker detected a failure. public let statusReason: String? - /// The time the service deployment stopped. The format is yyyy-MM-dd HH:mm:ss.SSSSSS. The service deployment stops when any of the following actions happen: A user manually stops the deployment The rollback option is not in use for the failure detection mechanism (the + /// The time the service deployment stopped. The format is yyyy-MM-dd + /// HH:mm:ss.SSSSSS. The service deployment stops when any of the following actions happen: A user manually stops the deployment The rollback option is not in use for the failure detection mechanism (the /// circuit breaker or alarm-based) and the service fails. public let stoppedAt: Date? /// The workload configuration being deployed. @@ -6511,12 +6853,14 @@ extension ECS { } public struct ServiceDeploymentAlarms: AWSDecodableShape { - /// The name of the CloudWatch alarms that determine when a service deployment failed. A "," separates the alarms. + /// The name of the CloudWatch alarms that determine when a service deployment failed. A + /// "," separates the alarms. public let alarmNames: [String]? - /// The status of the alarms check. Amazon ECS is not using alarms for service deployment failures when the status is DISABLED. + /// The status of the alarms check. Amazon ECS is not using alarms for service deployment + /// failures when the status is DISABLED. public let status: ServiceDeploymentRollbackMonitorsStatus? - /// One or more CloudWatch alarm names that have been triggered during the service deployment. A "," - /// separates the alarm names. + /// One or more CloudWatch alarm names that have been triggered during the service + /// deployment. A "," separates the alarm names. public let triggeredAlarmNames: [String]? @inlinable @@ -6551,7 +6895,8 @@ extension ECS { public let startedAt: Date? /// The status of the service deployment public let status: ServiceDeploymentStatus? - /// Information about why the service deployment is in the current status. For example, the circuit breaker detected a deployment failure. + /// Information about why the service deployment is in the current status. For example, + /// the circuit breaker detected a deployment failure. public let statusReason: String? /// The ARN of the service revision being deplyed. public let targetServiceRevisionArn: String? @@ -6585,12 +6930,13 @@ extension ECS { public struct ServiceDeploymentCircuitBreaker: AWSDecodableShape { /// The number of times the circuit breaker detected a service deploymeny failure. public let failureCount: Int? - /// The circuit breaker status. Amazon ECS is not using the circuit breaker for service deployment failures when the status is DISABLED. + /// The circuit breaker status. Amazon ECS is not using the circuit breaker for service + /// deployment failures when the status is DISABLED. public let status: ServiceDeploymentRollbackMonitorsStatus? - /// The threshhold which determines that the service deployment failed. The deployment circuit breaker calculates the threshold value, and then uses the value to - /// determine when to move the deployment to a FAILED state. The deployment circuit breaker - /// has a minimum threshold of 3 and a maximum threshold of 200. and uses the values in the - /// following formula to determine the deployment failure. 0.5 * desired task count + /// The threshhold which determines that the service deployment failed. The deployment circuit breaker calculates the threshold value, and then uses the value + /// to determine when to move the deployment to a FAILED state. The deployment circuit + /// breaker has a minimum threshold of 3 and a maximum threshold of 200. and uses the values + /// in the following formula to determine the deployment failure. 0.5 * desired task count public let threshold: Int? @inlinable @@ -6630,57 +6976,61 @@ extension ECS { } public struct ServiceManagedEBSVolumeConfiguration: AWSEncodableShape & AWSDecodableShape { - /// Indicates whether the volume should be encrypted. If no value is specified, encryption is turned on - /// by default. This parameter maps 1:1 with the Encrypted parameter of the CreateVolume - /// API in the Amazon EC2 API Reference. + /// Indicates whether the volume should be encrypted. If no value is specified, encryption + /// is turned on by default. This parameter maps 1:1 with the Encrypted + /// parameter of the CreateVolume API in + /// the Amazon EC2 API Reference. public let encrypted: Bool? - /// The filesystem type for the volume. For volumes created from a snapshot, you must specify - /// the same filesystem type that the volume was using when the snapshot was created. If - /// there is a filesystem type mismatch, the task will fail to start. The available Linux filesystem types are
 ext3, ext4, and + /// The filesystem type for the volume. For volumes created from a snapshot, you must + /// specify the same filesystem type that the volume was using when the snapshot was + /// created. If there is a filesystem type mismatch, the task will fail to start. The available Linux filesystem types are
 ext3, ext4, and /// xfs. If no value is specified, the xfs filesystem type is /// used by default. The available Windows filesystem types are NTFS. public let filesystemType: TaskFilesystemType? - /// The number of I/O operations per second (IOPS). For gp3, io1, and - /// io2 volumes, this represents the number of IOPS that are provisioned for the volume. - /// For gp2 volumes, this represents the baseline performance of the volume and the rate at - /// which the volume accumulates I/O credits for bursting. The following are the supported values for each volume type. gp3: 3,000 - 16,000 IOPS io1: 100 - 64,000 IOPS io2: 100 - 256,000 IOPS This parameter is required for io1 and io2 volume types. The default for - /// gp3 volumes is 3,000 IOPS. This parameter is not supported for - /// st1, sc1, or standard volume types. This parameter maps 1:1 with the Iops parameter of the CreateVolume API in the - /// Amazon EC2 API Reference. + /// The number of I/O operations per second (IOPS). For gp3, + /// io1, and io2 volumes, this represents the number of IOPS that + /// are provisioned for the volume. For gp2 volumes, this represents the + /// baseline performance of the volume and the rate at which the volume accumulates I/O + /// credits for bursting. The following are the supported values for each volume type. gp3: 3,000 - 16,000 IOPS io1: 100 - 64,000 IOPS io2: 100 - 256,000 IOPS This parameter is required for io1 and io2 volume types. The + /// default for gp3 volumes is 3,000 IOPS. This parameter is not + /// supported for st1, sc1, or standard volume + /// types. This parameter maps 1:1 with the Iops parameter of the CreateVolume API in the Amazon EC2 API Reference. public let iops: Int? - /// The Amazon Resource Name (ARN) identifier of the Amazon Web Services Key Management Service key to use for Amazon EBS encryption. When encryption is turned - /// on and no Amazon Web Services Key Management Service key is specified, the default Amazon Web Services managed key for Amazon EBS volumes is used. This - /// parameter maps 1:1 with the KmsKeyId parameter of the CreateVolume API in the - /// Amazon EC2 API Reference. Amazon Web Services authenticates the Amazon Web Services Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or - /// ARN that is invalid, the action can appear to complete, but eventually fails. + /// The Amazon Resource Name (ARN) identifier of the Amazon Web Services Key Management Service key to use for Amazon EBS encryption. When + /// encryption is turned on and no Amazon Web Services Key Management Service key is specified, the default Amazon Web Services managed key + /// for Amazon EBS volumes is used. This parameter maps 1:1 with the KmsKeyId + /// parameter of the CreateVolume API in + /// the Amazon EC2 API Reference. Amazon Web Services authenticates the Amazon Web Services Key Management Service key asynchronously. Therefore, if you specify an + /// ID, alias, or ARN that is invalid, the action can appear to complete, but + /// eventually fails. public let kmsKeyId: String? - /// The ARN of the IAM role to associate with this volume. This is the Amazon ECS infrastructure IAM role - /// that is used to manage your Amazon Web Services infrastructure. We recommend using the Amazon ECS-managed - /// AmazonECSInfrastructureRolePolicyForVolumes IAM policy with this role. For more - /// information, see Amazon ECS infrastructure IAM - /// role in the Amazon ECS Developer Guide. + /// The ARN of the IAM role to associate with this volume. This is the Amazon ECS + /// infrastructure IAM role that is used to manage your Amazon Web Services infrastructure. We recommend + /// using the Amazon ECS-managed AmazonECSInfrastructureRolePolicyForVolumes IAM + /// policy with this role. For more information, see Amazon ECS + /// infrastructure IAM role in the Amazon ECS Developer + /// Guide. public let roleArn: String - /// The size of the volume in GiB. You must specify either a volume size or a snapshot ID. If you specify - /// a snapshot ID, the snapshot size is used for the volume size by default. You can optionally specify a - /// volume size greater than or equal to the snapshot size. This parameter maps 1:1 with the - /// Size parameter of the CreateVolume API in the - /// Amazon EC2 API Reference. The following are the supported volume size values for each volume type. gp2 and gp3: 1-16,384 io1 and io2: 4-16,384 st1 and sc1: 125-16,384 standard: 1-1,024 + /// The size of the volume in GiB. You must specify either a volume size or a snapshot ID. + /// If you specify a snapshot ID, the snapshot size is used for the volume size by default. + /// You can optionally specify a volume size greater than or equal to the snapshot size. + /// This parameter maps 1:1 with the Size parameter of the CreateVolume API in the Amazon EC2 API Reference. The following are the supported volume size values for each volume type. gp2 and gp3: 1-16,384 io1 and io2: 4-16,384 st1 and sc1: 125-16,384 standard: 1-1,024 public let sizeInGiB: Int? - /// The snapshot that Amazon ECS uses to create the volume. You must specify either a snapshot ID or a volume - /// size. This parameter maps 1:1 with the SnapshotId parameter of the CreateVolume - /// API in the Amazon EC2 API Reference. + /// The snapshot that Amazon ECS uses to create the volume. You must specify either a snapshot + /// ID or a volume size. This parameter maps 1:1 with the SnapshotId parameter + /// of the CreateVolume API in + /// the Amazon EC2 API Reference. public let snapshotId: String? - /// The tags to apply to the volume. Amazon ECS applies service-managed tags by default. This parameter maps - /// 1:1 with the TagSpecifications.N parameter of the CreateVolume API in the - /// Amazon EC2 API Reference. + /// The tags to apply to the volume. Amazon ECS applies service-managed tags by default. This + /// parameter maps 1:1 with the TagSpecifications.N parameter of the CreateVolume API in the Amazon EC2 API Reference. public let tagSpecifications: [EBSTagSpecification]? - /// The throughput to provision for a volume, in MiB/s, with a maximum of 1,000 MiB/s. This parameter - /// maps 1:1 with the Throughput parameter of the CreateVolume API in the - /// Amazon EC2 API Reference. This parameter is only supported for the gp3 volume type. + /// The throughput to provision for a volume, in MiB/s, with a maximum of 1,000 MiB/s. + /// This parameter maps 1:1 with the Throughput parameter of the CreateVolume API in the Amazon EC2 API Reference. This parameter is only supported for the gp3 volume type. public let throughput: Int? - /// The volume type. This parameter maps 1:1 with the VolumeType parameter of the CreateVolume - /// API in the Amazon EC2 API Reference. For more information, see Amazon EBS volume types - /// in the Amazon EC2 User Guide. The following are the supported volume types. General Purpose SSD: gp2|gp3 Provisioned IOPS SSD: io1|io2 Throughput Optimized HDD: st1 Cold HDD: sc1 Magnetic: standard The magnetic volume type is not supported on Fargate. + /// The volume type. This parameter maps 1:1 with the VolumeType parameter of + /// the CreateVolume API in the Amazon EC2 API Reference. For more + /// information, see Amazon EBS volume types in + /// the Amazon EC2 User Guide. The following are the supported volume types. General Purpose SSD: gp2|gp3 Provisioned IOPS SSD: io1|io2 Throughput Optimized HDD: st1 Cold HDD: sc1 Magnetic: standard The magnetic volume type is not supported on Fargate. public let volumeType: String? @inlinable @@ -6718,27 +7068,30 @@ extension ECS { } public struct ServiceRegistry: AWSEncodableShape & AWSDecodableShape { - /// The container name value to be used for your service discovery service. It's already specified in the - /// task definition. If the task definition that your service task specifies uses the bridge - /// or host network mode, you must specify a containerName and - /// containerPort combination from the task definition. If the task definition that your - /// service task specifies uses the awsvpc network mode and a type SRV DNS record is used, you - /// must specify either a containerName and containerPort combination or a + /// The container name value to be used for your service discovery service. It's already + /// specified in the task definition. If the task definition that your service task + /// specifies uses the bridge or host network mode, you must + /// specify a containerName and containerPort combination from the + /// task definition. If the task definition that your service task specifies uses the + /// awsvpc network mode and a type SRV DNS record is used, you must specify + /// either a containerName and containerPort combination or a /// port value. However, you can't specify both. public let containerName: String? - /// The port value to be used for your service discovery service. It's already specified in the task - /// definition. If the task definition your service task specifies uses the bridge or - /// host network mode, you must specify a containerName and - /// containerPort combination from the task definition. If the task definition your - /// service task specifies uses the awsvpc network mode and a type SRV DNS record is used, you - /// must specify either a containerName and containerPort combination or a + /// The port value to be used for your service discovery service. It's already specified + /// in the task definition. If the task definition your service task specifies uses the + /// bridge or host network mode, you must specify a + /// containerName and containerPort combination from the task + /// definition. If the task definition your service task specifies uses the + /// awsvpc network mode and a type SRV DNS record is used, you must specify + /// either a containerName and containerPort combination or a /// port value. However, you can't specify both. public let containerPort: Int? - /// The port value used if your service discovery service specified an SRV record. This field might be - /// used if both the awsvpc network mode and SRV records are used. + /// The port value used if your service discovery service specified an SRV record. This + /// field might be used if both the awsvpc network mode and SRV records are + /// used. public let port: Int? - /// The Amazon Resource Name (ARN) of the service registry. The currently supported service registry is Cloud Map. For more - /// information, see CreateService. + /// The Amazon Resource Name (ARN) of the service registry. The currently supported service registry is + /// Cloud Map. For more information, see CreateService. public let registryArn: String? @inlinable @@ -6764,7 +7117,8 @@ extension ECS { public let clusterArn: String? /// The container images the service revision uses. public let containerImages: [ContainerImage]? - /// The time that the service revision was created. The format is yyyy-mm-dd HH:mm:ss.SSSSS. + /// The time that the service revision was created. The format is yyyy-mm-dd + /// HH:mm:ss.SSSSS. public let createdAt: Date? public let fargateEphemeralStorage: DeploymentEphemeralStorage? /// Indicates whether Runtime Monitoring is turned on. @@ -6863,12 +7217,13 @@ extension ECS { } public struct ServiceVolumeConfiguration: AWSEncodableShape & AWSDecodableShape { - /// The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf. These settings - /// are used to create each Amazon EBS volume, with one volume created for each task in the service. The Amazon EBS - /// volumes are visible in your account in the Amazon EC2 console once they are created. + /// The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf. + /// These settings are used to create each Amazon EBS volume, with one volume created for each + /// task in the service. The Amazon EBS volumes are visible in your account in the Amazon EC2 console + /// once they are created. public let managedEBSVolume: ServiceManagedEBSVolumeConfiguration? - /// The name of the volume. This value must match the volume name from the Volume object in - /// the task definition. + /// The name of the volume. This value must match the volume name from the + /// Volume object in the task definition. public let name: String @inlinable @@ -6890,11 +7245,11 @@ extension ECS { public struct Session: AWSDecodableShape { /// The ID of the execute command session. public let sessionId: String? - /// A URL to the managed agent on the container that the SSM Session Manager client uses to send commands - /// and receive output from the container. + /// A URL to the managed agent on the container that the SSM Session Manager client uses + /// to send commands and receive output from the container. public let streamUrl: String? - /// An encrypted token value containing session and caller information. It's used to authenticate the - /// connection to the container. + /// An encrypted token value containing session and caller information. It's used to + /// authenticate the connection to the container. public let tokenValue: String? @inlinable @@ -6914,12 +7269,12 @@ extension ECS { public struct Setting: AWSDecodableShape { /// The Amazon ECS resource name. public let name: SettingName? - /// The ARN of the principal. It can be a user, role, or the root user. If this field is omitted, the - /// authenticated user is assumed. + /// The ARN of the principal. It can be a user, role, or the root user. If this field is + /// omitted, the authenticated user is assumed. public let principalArn: String? - /// Indicates whether Amazon Web Services manages the account setting, or if the user manages it. aws_managed account settings are read-only, as Amazon Web Services manages such on the customer's - /// behalf. Currently, the guardDutyActivate account setting is the only one Amazon Web Services - /// manages. + /// Indicates whether Amazon Web Services manages the account setting, or if the user manages it. aws_managed account settings are read-only, as Amazon Web Services manages such on the + /// customer's behalf. Currently, the guardDutyActivate account setting is the + /// only one Amazon Web Services manages. public let type: SettingType? /// Determines whether the account setting is on or off for the specified resource. public let value: String? @@ -6944,51 +7299,53 @@ extension ECS { /// The short name or full Amazon Resource Name (ARN) of the cluster where to start your task. /// If you do not specify a cluster, the default cluster is assumed. public let cluster: String? - /// The container instance IDs or full ARN entries for the container instances where you would like to - /// place your task. You can specify up to 10 container instances. + /// The container instance IDs or full ARN entries for the container instances where you + /// would like to place your task. You can specify up to 10 container instances. public let containerInstances: [String] - /// Specifies whether to use Amazon ECS managed tags for the task. For more information, see Tagging Your Amazon ECS + /// Specifies whether to use Amazon ECS managed tags for the task. For more information, see + /// Tagging Your Amazon ECS /// Resources in the Amazon Elastic Container Service Developer Guide. public let enableECSManagedTags: Bool? - /// Whether or not the execute command functionality is turned on for the task. If true, - /// this turns on the execute command functionality on all containers in the task. + /// Whether or not the execute command functionality is turned on for the task. If + /// true, this turns on the execute command functionality on all containers + /// in the task. public let enableExecuteCommand: Bool? - /// The name of the task group to associate with the task. The default value is the family name of the - /// task definition (for example, family:my-family-name). + /// The name of the task group to associate with the task. The default value is the family + /// name of the task definition (for example, family:my-family-name). public let group: String? /// The VPC subnet and security group configuration for tasks that receive their own elastic network interface by using the awsvpc networking mode. public let networkConfiguration: NetworkConfiguration? - /// A list of container overrides in JSON format that specify the name of a container in the specified - /// task definition and the overrides it receives. You can override the default command for a container - /// (that's specified in the task definition or Docker image) with a command override. You can - /// also override existing environment variables (that are specified in the task definition or Docker - /// image) on a container or add new environment variables to it with an environment - /// override. A total of 8192 characters are allowed for overrides. This limit includes the JSON formatting - /// characters of the override structure. + /// A list of container overrides in JSON format that specify the name of a container in + /// the specified task definition and the overrides it receives. You can override the + /// default command for a container (that's specified in the task definition or Docker + /// image) with a command override. You can also override existing environment + /// variables (that are specified in the task definition or Docker image) on a container or + /// add new environment variables to it with an environment override. A total of 8192 characters are allowed for overrides. This limit includes the JSON + /// formatting characters of the override structure. public let overrides: TaskOverride? - /// Specifies whether to propagate the tags from the task definition or the service to the task. If no - /// value is specified, the tags aren't propagated. + /// Specifies whether to propagate the tags from the task definition or the service to the + /// task. If no value is specified, the tags aren't propagated. public let propagateTags: PropagateTags? /// This parameter is only used by Amazon ECS. It is not intended for use by customers. public let referenceId: String? - /// An optional tag specified when a task is started. For example, if you automatically trigger - /// a task to run a batch process job, you could apply a unique identifier for that job to - /// your task with the startedBy parameter. You can then identify which tasks - /// belong to that job by filtering the results of a ListTasks call with + /// An optional tag specified when a task is started. For example, if you automatically + /// trigger a task to run a batch process job, you could apply a unique identifier for that + /// job to your task with the startedBy parameter. You can then identify which + /// tasks belong to that job by filtering the results of a ListTasks call with /// the startedBy value. Up to 36 letters (uppercase and lowercase), numbers, /// hyphens (-), forward slash (/), and underscores (_) are allowed. If a task is started by an Amazon ECS service, the startedBy parameter /// contains the deployment ID of the service that starts it. public let startedBy: String? - /// The metadata that you apply to the task to help you categorize and organize them. Each tag consists - /// of a key and an optional value, both of which you define. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. + /// The metadata that you apply to the task to help you categorize and organize them. Each + /// tag consists of a key and an optional value, both of which you define. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. public let tags: [Tag]? - /// The family and revision (family:revision) or full ARN of the - /// task definition to start. If a revision isn't specified, the latest ACTIVE - /// revision is used. + /// The family and revision (family:revision) or + /// full ARN of the task definition to start. If a revision isn't specified, + /// the latest ACTIVE revision is used. public let taskDefinition: String - /// The details of the volume that was configuredAtLaunch. You can configure the size, - /// volumeType, IOPS, throughput, snapshot and encryption in TaskManagedEBSVolumeConfiguration. The name of the volume must match the - /// name from the task definition. + /// The details of the volume that was configuredAtLaunch. You can configure + /// the size, volumeType, IOPS, throughput, snapshot and encryption in TaskManagedEBSVolumeConfiguration. The name of the volume must + /// match the name from the task definition. public let volumeConfigurations: [TaskVolumeConfiguration]? @inlinable @@ -7038,8 +7395,8 @@ extension ECS { public struct StartTaskResponse: AWSDecodableShape { /// Any failures associated with the call. public let failures: [Failure]? - /// A full description of the tasks that were started. Each task that was successfully placed on your - /// container instances is described. + /// A full description of the tasks that were started. Each task that was successfully + /// placed on your container instances is described. public let tasks: [Task]? @inlinable @@ -7058,10 +7415,10 @@ extension ECS { /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task to stop. /// If you do not specify a cluster, the default cluster is assumed. public let cluster: String? - /// An optional message specified when a task is stopped. For example, if you're using a custom - /// scheduler, you can use this parameter to specify the reason for stopping the task here, and the message - /// appears in subsequent DescribeTasks> API operations on - /// this task. + /// An optional message specified when a task is stopped. For example, if you're using a + /// custom scheduler, you can use this parameter to specify the reason for stopping the task + /// here, and the message appears in subsequent DescribeTasks> + /// API operations on this task. public let reason: String? /// The task ID of the task to stop. public let task: String @@ -7097,8 +7454,8 @@ extension ECS { public struct SubmitAttachmentStateChangesRequest: AWSEncodableShape { /// Any attachments associated with the state change request. public let attachments: [AttachmentStateChange] - /// The short name or full ARN of the cluster that hosts the container instance the attachment belongs - /// to. + /// The short name or full ARN of the cluster that hosts the container instance the + /// attachment belongs to. public let cluster: String? @inlinable @@ -7250,9 +7607,11 @@ extension ECS { public struct SystemControl: AWSEncodableShape & AWSDecodableShape { /// The namespaced kernel parameter to set a value for. public let namespace: String? - /// The namespaced kernel parameter to set a value for. Valid IPC namespace values: "kernel.msgmax" | "kernel.msgmnb" | "kernel.msgmni" | "kernel.sem" - /// | "kernel.shmall" | "kernel.shmmax" | "kernel.shmmni" | "kernel.shm_rmid_forced", and - /// Sysctls that start with "fs.mqueue.*" Valid network namespace values: Sysctls that start with "net.*" All of these values are supported by Fargate. + /// The namespaced kernel parameter to set a value for. Valid IPC namespace values: "kernel.msgmax" | "kernel.msgmnb" | "kernel.msgmni" + /// | "kernel.sem" | "kernel.shmall" | "kernel.shmmax" | "kernel.shmmni" | + /// "kernel.shm_rmid_forced", and Sysctls that start with + /// "fs.mqueue.*" Valid network namespace values: Sysctls that start with + /// "net.*" All of these values are supported by Fargate. public let value: String? @inlinable @@ -7268,11 +7627,11 @@ extension ECS { } public struct Tag: AWSEncodableShape & AWSDecodableShape { - /// One part of a key-value pair that make up a tag. A key is a general label that acts like - /// a category for more specific tag values. + /// One part of a key-value pair that make up a tag. A key is a general label + /// that acts like a category for more specific tag values. public let key: String? - /// The optional part of a key-value pair that make up a tag. A value acts as a descriptor - /// within a tag category (key). + /// The optional part of a key-value pair that make up a tag. A value acts as + /// a descriptor within a tag category (key). public let value: String? @inlinable @@ -7296,8 +7655,9 @@ extension ECS { } public struct TagResourceRequest: AWSEncodableShape { - /// The Amazon Resource Name (ARN) of the resource to add tags to. Currently, the supported resources are Amazon ECS capacity - /// providers, tasks, services, task definitions, clusters, and container instances. + /// The Amazon Resource Name (ARN) of the resource to add tags to. Currently, the supported resources are + /// Amazon ECS capacity providers, tasks, services, task definitions, clusters, and container + /// instances. public let resourceArn: String /// The tags to add to the resource. A tag is an array of key-value pairs. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. public let tags: [Tag] @@ -7326,8 +7686,8 @@ extension ECS { } public struct Task: AWSDecodableShape { - /// The Elastic Network Adapter that's associated with the task if the task uses the awsvpc - /// network mode. + /// The Elastic Network Adapter that's associated with the task if the task uses the + /// awsvpc network mode. public let attachments: [Attachment]? /// The attributes of the task public let attributes: [Attribute]? @@ -7339,29 +7699,33 @@ extension ECS { public let clusterArn: String? /// The connectivity status of a task. public let connectivity: Connectivity? - /// The Unix timestamp for the time when the task last went into CONNECTED status. + /// The Unix timestamp for the time when the task last went into CONNECTED + /// status. public let connectivityAt: Date? /// The ARN of the container instances that host the task. public let containerInstanceArn: String? /// The containers that's associated with the task. public let containers: [Container]? - /// The number of CPU units used by the task as expressed in a task definition. It can be expressed as an - /// integer using CPU units (for example, 1024). It can also be expressed as a string using - /// vCPUs (for example, 1 vCPU or 1 vcpu). String values are converted to an - /// integer that indicates the CPU units when the task definition is registered. If you use the EC2 launch type, this field is optional. Supported values are between - /// 128 CPU units (0.125 vCPUs) and 10240 CPU units - /// (10 vCPUs). If you use the Fargate launch type, this field is required. You must use one of the - /// following values. These values determine the range of supported values for the memory - /// parameter: The CPU units cannot be less than 1 vCPU when you use Windows containers on + /// The number of CPU units used by the task as expressed in a task definition. It can be + /// expressed as an integer using CPU units (for example, 1024). It can also be + /// expressed as a string using vCPUs (for example, 1 vCPU or 1 + /// vcpu). String values are converted to an integer that indicates the CPU units + /// when the task definition is registered. If you use the EC2 launch type, this field is optional. Supported values + /// are between 128 CPU units (0.125 vCPUs) and 10240 + /// CPU units (10 vCPUs). If you use the Fargate launch type, this field is required. You must use + /// one of the following values. These values determine the range of supported values for + /// the memory parameter: The CPU units cannot be less than 1 vCPU when you use Windows containers on /// Fargate. 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) 2048 (2 vCPU) - Available memory values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) 4096 (4 vCPU) - Available memory values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) 8192 (8 vCPU) - Available memory values: 16 GB and 60 GB in 4 GB increments This option requires Linux platform 1.4.0 or later. 16384 (16vCPU) - Available memory values: 32GB and 120 GB in 8 GB increments This option requires Linux platform 1.4.0 or later. public let cpu: String? - /// The Unix timestamp for the time when the task was created. More specifically, it's for the time when - /// the task entered the PENDING state. + /// The Unix timestamp for the time when the task was created. More specifically, it's for + /// the time when the task entered the PENDING state. public let createdAt: Date? - /// The desired status of the task. For more information, see Task Lifecycle. + /// The desired status of the task. For more information, see Task + /// Lifecycle. public let desiredStatus: String? - /// Determines whether execute command functionality is turned on for this task. If true, - /// execute command functionality is turned on all the containers in the task. + /// Determines whether execute command functionality is turned on for this task. If + /// true, execute command functionality is turned on all the containers in + /// the task. public let enableExecuteCommand: Bool? /// The ephemeral storage settings for the task. public let ephemeralStorage: EphemeralStorage? @@ -7371,75 +7735,83 @@ extension ECS { public let fargateEphemeralStorage: TaskEphemeralStorage? /// The name of the task group that's associated with the task. public let group: String? - /// The health status for the task. It's determined by the health of the essential containers in the - /// task. If all essential containers in the task are reporting as HEALTHY, the task status - /// also reports as HEALTHY. If any essential containers in the task are reporting as - /// UNHEALTHY or UNKNOWN, the task status also reports as - /// UNHEALTHY or UNKNOWN. The Amazon ECS container agent doesn't monitor or report on Docker health checks that are embedded in - /// a container image and not specified in the container definition. For example, this includes those - /// specified in a parent image or from the image's Dockerfile. Health check parameters that are - /// specified in a container definition override any Docker health checks that are found in the - /// container image. + /// The health status for the task. It's determined by the health of the essential + /// containers in the task. If all essential containers in the task are reporting as + /// HEALTHY, the task status also reports as HEALTHY. If any + /// essential containers in the task are reporting as UNHEALTHY or + /// UNKNOWN, the task status also reports as UNHEALTHY or + /// UNKNOWN. The Amazon ECS container agent doesn't monitor or report on Docker health checks that + /// are embedded in a container image and not specified in the container definition. For + /// example, this includes those specified in a parent image or from the image's + /// Dockerfile. Health check parameters that are specified in a container definition + /// override any Docker health checks that are found in the container image. public let healthStatus: HealthStatus? /// The Elastic Inference accelerator that's associated with the task. public let inferenceAccelerators: [InferenceAccelerator]? - /// The last known status for the task. For more information, see Task Lifecycle. + /// The last known status for the task. For more information, see Task + /// Lifecycle. public let lastStatus: String? - /// The infrastructure where your task runs on. For more information, see Amazon ECS launch - /// types in the Amazon Elastic Container Service Developer Guide. + /// The infrastructure where your task runs on. For more information, see Amazon ECS + /// launch types in the Amazon Elastic Container Service Developer Guide. public let launchType: LaunchType? - /// The amount of memory (in MiB) that the task uses as expressed in a task definition. It can be - /// expressed as an integer using MiB (for example, 1024). If it's expressed as a string using - /// GB (for example, 1GB or 1 GB), it's converted to an integer indicating the - /// MiB when the task definition is registered. If you use the EC2 launch type, this field is optional. If you use the Fargate launch type, this field is required. You must use one of the - /// following values. The value that you choose determines the range of supported values for the - /// cpu parameter. 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU) 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU) 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU) Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU) Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU) Between 16 GB and 60 GB in 4 GB increments - Available cpu values: 8192 (8 vCPU) This option requires Linux platform 1.4.0 or later. Between 32GB and 120 GB in 8 GB increments - Available cpu values: 16384 (16 vCPU) This option requires Linux platform 1.4.0 or later. + /// The amount of memory (in MiB) that the task uses as expressed in a task definition. It + /// can be expressed as an integer using MiB (for example, 1024). If it's + /// expressed as a string using GB (for example, 1GB or 1 GB), + /// it's converted to an integer indicating the MiB when the task definition is + /// registered. If you use the EC2 launch type, this field is optional. If you use the Fargate launch type, this field is required. You must use + /// one of the following values. The value that you choose determines the range of supported + /// values for the cpu parameter. 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU) 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU) 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU) Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU) Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU) Between 16 GB and 60 GB in 4 GB increments - Available cpu values: 8192 (8 vCPU) This option requires Linux platform 1.4.0 or later. Between 32GB and 120 GB in 8 GB increments - Available cpu values: 16384 (16 vCPU) This option requires Linux platform 1.4.0 or later. public let memory: String? /// One or more container overrides. public let overrides: TaskOverride? - /// The operating system that your tasks are running on. A platform family is specified only for tasks - /// that use the Fargate launch type. All tasks that run as part of this service must use the same platformFamily value as - /// the service (for example, LINUX.). + /// The operating system that your tasks are running on. A platform family is specified + /// only for tasks that use the Fargate launch type. All tasks that run as part of this service must use the same + /// platformFamily value as the service (for example, + /// LINUX.). public let platformFamily: String? - /// The platform version where your task runs on. A platform version is only specified for tasks that use - /// the Fargate launch type. If you didn't specify one, the LATEST platform - /// version is used. For more information, see Fargate Platform Versions in - /// the Amazon Elastic Container Service Developer Guide. + /// The platform version where your task runs on. A platform version is only specified for + /// tasks that use the Fargate launch type. If you didn't specify one, the + /// LATEST platform version is used. For more information, see Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide. public let platformVersion: String? /// The Unix timestamp for the time when the container image pull began. public let pullStartedAt: Date? /// The Unix timestamp for the time when the container image pull completed. public let pullStoppedAt: Date? - /// The Unix timestamp for the time when the task started. More specifically, it's for the time when the - /// task transitioned from the PENDING state to the RUNNING state. + /// The Unix timestamp for the time when the task started. More specifically, it's for the + /// time when the task transitioned from the PENDING state to the + /// RUNNING state. public let startedAt: Date? /// The tag specified when a task is started. If an Amazon ECS service started the task, the /// startedBy parameter contains the deployment ID of that service. public let startedBy: String? - /// The stop code indicating why a task was stopped. The stoppedReason might contain - /// additional details. For more information about stop code, see Stopped tasks error - /// codes in the Amazon ECS Developer Guide. + /// The stop code indicating why a task was stopped. The stoppedReason might + /// contain additional details. For more information about stop code, see Stopped tasks + /// error codes in the Amazon ECS Developer Guide. public let stopCode: TaskStopCode? - /// The Unix timestamp for the time when the task was stopped. More specifically, it's for the time when - /// the task transitioned from the RUNNING state to the STOPPED state. + /// The Unix timestamp for the time when the task was stopped. More specifically, it's for + /// the time when the task transitioned from the RUNNING state to the + /// STOPPED state. public let stoppedAt: Date? /// The reason that the task was stopped. public let stoppedReason: String? - /// The Unix timestamp for the time when the task stops. More specifically, it's for the time when the - /// task transitions from the RUNNING state to STOPPING. + /// The Unix timestamp for the time when the task stops. More specifically, it's for the + /// time when the task transitions from the RUNNING state to + /// STOPPING. public let stoppingAt: Date? - /// The metadata that you apply to the task to help you categorize and organize the task. Each tag - /// consists of a key and an optional value. You define both the key and value. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. + /// The metadata that you apply to the task to help you categorize and organize the task. + /// Each tag consists of a key and an optional value. You define both the key and + /// value. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. public let tags: [Tag]? /// The Amazon Resource Name (ARN) of the task. public let taskArn: String? /// The ARN of the task definition that creates the task. public let taskDefinitionArn: String? - /// The version counter for the task. Every time a task experiences a change that starts a CloudWatch event, - /// the version counter is incremented. If you replicate your Amazon ECS task state with CloudWatch Events, you can - /// compare the version of a task reported by the Amazon ECS API actions with the version reported in CloudWatch - /// Events for the task (inside the detail object) to verify that the version in your event - /// stream is current. + /// The version counter for the task. Every time a task experiences a change that starts a + /// CloudWatch event, the version counter is incremented. If you replicate your Amazon ECS task state + /// with CloudWatch Events, you can compare the version of a task reported by the Amazon ECS API + /// actions with the version reported in CloudWatch Events for the task (inside the + /// detail object) to verify that the version in your event stream is + /// current. public let version: Int64? @inlinable @@ -7525,42 +7897,49 @@ extension ECS { } public struct TaskDefinition: AWSDecodableShape { - /// Amazon ECS validates the task definition parameters with those supported by the launch type. For more - /// information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide. + /// Amazon ECS validates the task definition parameters with those supported by the launch + /// type. For more information, see Amazon ECS launch types + /// in the Amazon Elastic Container Service Developer Guide. public let compatibilities: [Compatibility]? - /// A list of container definitions in JSON format that describe the different containers that make up - /// your task. For more information about container definition parameters and defaults, see Amazon ECS Task + /// A list of container definitions in JSON format that describe the different containers + /// that make up your task. For more information about container definition parameters and + /// defaults, see Amazon ECS Task /// Definitions in the Amazon Elastic Container Service Developer Guide. public let containerDefinitions: [ContainerDefinition]? - /// The number of cpu units used by the task. If you use the EC2 launch type, this field is - /// optional. Any value can be used. If you use the Fargate launch type, this field is required. You must - /// use one of the following values. The value that you choose determines your range of valid values for - /// the memory parameter. If you use the EC2 launch type, this field is optional. Supported values are between - /// 128 CPU units (0.125 vCPUs) and 10240 CPU units - /// (10 vCPUs). The CPU units cannot be less than 1 vCPU when you use Windows containers on + /// The number of cpu units used by the task. If you use the EC2 launch type, + /// this field is optional. Any value can be used. If you use the Fargate launch type, this + /// field is required. You must use one of the following values. The value that you choose + /// determines your range of valid values for the memory parameter. If you use the EC2 launch type, this field is optional. Supported values + /// are between 128 CPU units (0.125 vCPUs) and 10240 + /// CPU units (10 vCPUs). The CPU units cannot be less than 1 vCPU when you use Windows containers on /// Fargate. 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) 2048 (2 vCPU) - Available memory values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) 4096 (4 vCPU) - Available memory values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) 8192 (8 vCPU) - Available memory values: 16 GB and 60 GB in 4 GB increments This option requires Linux platform 1.4.0 or later. 16384 (16vCPU) - Available memory values: 32GB and 120 GB in 8 GB increments This option requires Linux platform 1.4.0 or later. public let cpu: String? /// The Unix timestamp for the time when the task definition was deregistered. public let deregisteredAt: Date? + /// Enables fault injection and allows for fault injection requests to be accepted from the task's containers. + /// The default value is false. + public let enableFaultInjection: Bool? /// The ephemeral storage settings to use for tasks run with the task definition. public let ephemeralStorage: EphemeralStorage? /// The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make Amazon Web Services API calls on your behalf. For informationabout the required IAM roles for Amazon ECS, see IAM roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide. public let executionRoleArn: String? - /// The name of a family that this task definition is registered to. Up to 255 characters are allowed. - /// Letters (both uppercase and lowercase letters), numbers, hyphens (-), and underscores (_) are - /// allowed. A family groups multiple versions of a task definition. Amazon ECS gives the first task definition that - /// you registered to a family a revision number of 1. Amazon ECS gives sequential revision numbers to each task - /// definition that you add. + /// The name of a family that this task definition is registered to. Up to 255 characters + /// are allowed. Letters (both uppercase and lowercase letters), numbers, hyphens (-), and + /// underscores (_) are allowed. A family groups multiple versions of a task definition. Amazon ECS gives the first task + /// definition that you registered to a family a revision number of 1. Amazon ECS gives + /// sequential revision numbers to each task definition that you add. public let family: String? /// The Elastic Inference accelerator that's associated with the task. public let inferenceAccelerators: [InferenceAccelerator]? /// The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide. For tasks that use the host IPC mode, IPC namespace related systemControls are not supported. For tasks that use the task IPC mode, IPC namespace related systemControls will apply to all containers within a task. This parameter is not supported for Windows containers or tasks run on Fargate. public let ipcMode: IpcMode? - /// The amount (in MiB) of memory used by the task. If your tasks runs on Amazon EC2 instances, you must specify either a task-level memory value or a - /// container-level memory value. This field is optional and any value can be used. If a task-level memory - /// value is specified, the container-level memory value is optional. For more information regarding - /// container-level memory and memory reservation, see ContainerDefinition. If your tasks runs on Fargate, this field is required. You must use one of the following values. - /// The value you choose determines your range of valid values for the cpu parameter. 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU) 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU) 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU) Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU) Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU) Between 16 GB and 60 GB in 4 GB increments - Available cpu values: 8192 (8 vCPU) This option requires Linux platform 1.4.0 or later. Between 32GB and 120 GB in 8 GB increments - Available cpu values: 16384 (16 vCPU) This option requires Linux platform 1.4.0 or later. + /// The amount (in MiB) of memory used by the task. If your tasks runs on Amazon EC2 instances, you must specify either a task-level memory + /// value or a container-level memory value. This field is optional and any value can be + /// used. If a task-level memory value is specified, the container-level memory value is + /// optional. For more information regarding container-level memory and memory reservation, + /// see ContainerDefinition. If your tasks runs on Fargate, this field is required. You must use one of the + /// following values. The value you choose determines your range of valid values for the + /// cpu parameter. 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU) 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU) 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU) Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU) Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU) Between 16 GB and 60 GB in 4 GB increments - Available cpu values: 8192 (8 vCPU) This option requires Linux platform 1.4.0 or later. Between 32GB and 120 GB in 8 GB increments - Available cpu values: 16384 (16 vCPU) This option requires Linux platform 1.4.0 or later. public let memory: String? /// The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. If no network mode is specified, the default is bridge. For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, or awsvpc can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode. With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings. When using the host network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide. If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used. public let networkMode: NetworkMode? @@ -7568,54 +7947,58 @@ extension ECS { public let pidMode: PidMode? /// An array of placement constraint objects to use for tasks. This parameter isn't supported for tasks run on Fargate. public let placementConstraints: [TaskDefinitionPlacementConstraint]? - /// The configuration details for the App Mesh proxy. Your Amazon ECS container instances require at least version 1.26.0 of the container agent and at least - /// version 1.26.0-1 of the ecs-init package to use a proxy configuration. If your container - /// instances are launched from the Amazon ECS optimized AMI version 20190301 or later, they - /// contain the required versions of the container agent and ecs-init. For more information, - /// see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide. + /// The configuration details for the App Mesh proxy. Your Amazon ECS container instances require at least version 1.26.0 of the container agent + /// and at least version 1.26.0-1 of the ecs-init package to use a proxy + /// configuration. If your container instances are launched from the Amazon ECS optimized AMI + /// version 20190301 or later, they contain the required versions of the + /// container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide. public let proxyConfiguration: ProxyConfiguration? /// The Unix timestamp for the time when the task definition was registered. public let registeredAt: Date? /// The principal that registered the task definition. public let registeredBy: String? - /// The container instance attributes required by your task. When an Amazon EC2 instance is registered to your - /// cluster, the Amazon ECS container agent assigns some standard attributes to the instance. You can apply - /// custom attributes. These are specified as key-value pairs using the Amazon ECS console or the PutAttributes API. These attributes are used when determining task placement for tasks - /// hosted on Amazon EC2 instances. For more information, see Attributes - /// in the Amazon Elastic Container Service Developer Guide. This parameter isn't supported for tasks run on Fargate. + /// The container instance attributes required by your task. When an Amazon EC2 instance is + /// registered to your cluster, the Amazon ECS container agent assigns some standard attributes + /// to the instance. You can apply custom attributes. These are specified as key-value pairs + /// using the Amazon ECS console or the PutAttributes + /// API. These attributes are used when determining task placement for tasks hosted on Amazon EC2 + /// instances. For more information, see Attributes in the Amazon Elastic Container Service Developer Guide. This parameter isn't supported for tasks run on Fargate. public let requiresAttributes: [Attribute]? /// The task launch types the task definition was validated against. The valid values are - /// EC2, FARGATE, and EXTERNAL. For more information, see Amazon ECS launch - /// types in the Amazon Elastic Container Service Developer Guide. + /// EC2, FARGATE, and EXTERNAL. For more + /// information, see Amazon ECS launch types + /// in the Amazon Elastic Container Service Developer Guide. public let requiresCompatibilities: [Compatibility]? - /// The revision of the task in a particular family. The revision is a version number of a task - /// definition in a family. When you register a task definition for the first time, the revision is - /// 1. Each time that you register a new revision of a task definition in the same family, - /// the revision value always increases by one. This is even if you deregistered previous revisions in this - /// family. + /// The revision of the task in a particular family. The revision is a version number of a + /// task definition in a family. When you register a task definition for the first time, the + /// revision is 1. Each time that you register a new revision of a task + /// definition in the same family, the revision value always increases by one. This is even + /// if you deregistered previous revisions in this family. public let revision: Int? - /// The operating system that your task definitions are running on. A platform family is specified only - /// for tasks using the Fargate launch type. When you specify a task in a service, this value must match the runtimePlatform value of - /// the service. + /// The operating system that your task definitions are running on. A platform family is + /// specified only for tasks using the Fargate launch type. When you specify a task in a service, this value must match the + /// runtimePlatform value of the service. public let runtimePlatform: RuntimePlatform? /// The status of the task definition. public let status: TaskDefinitionStatus? /// The full Amazon Resource Name (ARN) of the task definition. public let taskDefinitionArn: String? - /// The short name or full Amazon Resource Name (ARN) of the Identity and Access Management role that grants containers in the task permission - /// to call Amazon Web Services APIs on your behalf. For informationabout the required IAM roles for Amazon ECS, see IAM roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide. + /// The short name or full Amazon Resource Name (ARN) of the Identity and Access Management role that grants containers in the + /// task permission to call Amazon Web Services APIs on your behalf. For informationabout the required + /// IAM roles for Amazon ECS, see IAM + /// roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide. public let taskRoleArn: String? - /// The list of data volume definitions for the task. For more information, see Using data - /// volumes in tasks in the Amazon Elastic Container Service Developer Guide. The host and sourcePath parameters aren't supported for tasks run on - /// Fargate. + /// The list of data volume definitions for the task. For more information, see Using data volumes in tasks in the Amazon Elastic Container Service Developer Guide. The host and sourcePath parameters aren't supported for + /// tasks run on Fargate. public let volumes: [Volume]? @inlinable - public init(compatibilities: [Compatibility]? = nil, containerDefinitions: [ContainerDefinition]? = nil, cpu: String? = nil, deregisteredAt: Date? = nil, ephemeralStorage: EphemeralStorage? = nil, executionRoleArn: String? = nil, family: String? = nil, inferenceAccelerators: [InferenceAccelerator]? = nil, ipcMode: IpcMode? = nil, memory: String? = nil, networkMode: NetworkMode? = nil, pidMode: PidMode? = nil, placementConstraints: [TaskDefinitionPlacementConstraint]? = nil, proxyConfiguration: ProxyConfiguration? = nil, registeredAt: Date? = nil, registeredBy: String? = nil, requiresAttributes: [Attribute]? = nil, requiresCompatibilities: [Compatibility]? = nil, revision: Int? = nil, runtimePlatform: RuntimePlatform? = nil, status: TaskDefinitionStatus? = nil, taskDefinitionArn: String? = nil, taskRoleArn: String? = nil, volumes: [Volume]? = nil) { + public init(compatibilities: [Compatibility]? = nil, containerDefinitions: [ContainerDefinition]? = nil, cpu: String? = nil, deregisteredAt: Date? = nil, enableFaultInjection: Bool? = nil, ephemeralStorage: EphemeralStorage? = nil, executionRoleArn: String? = nil, family: String? = nil, inferenceAccelerators: [InferenceAccelerator]? = nil, ipcMode: IpcMode? = nil, memory: String? = nil, networkMode: NetworkMode? = nil, pidMode: PidMode? = nil, placementConstraints: [TaskDefinitionPlacementConstraint]? = nil, proxyConfiguration: ProxyConfiguration? = nil, registeredAt: Date? = nil, registeredBy: String? = nil, requiresAttributes: [Attribute]? = nil, requiresCompatibilities: [Compatibility]? = nil, revision: Int? = nil, runtimePlatform: RuntimePlatform? = nil, status: TaskDefinitionStatus? = nil, taskDefinitionArn: String? = nil, taskRoleArn: String? = nil, volumes: [Volume]? = nil) { self.compatibilities = compatibilities self.containerDefinitions = containerDefinitions self.cpu = cpu self.deregisteredAt = deregisteredAt + self.enableFaultInjection = enableFaultInjection self.ephemeralStorage = ephemeralStorage self.executionRoleArn = executionRoleArn self.family = family @@ -7643,6 +8026,7 @@ extension ECS { case containerDefinitions = "containerDefinitions" case cpu = "cpu" case deregisteredAt = "deregisteredAt" + case enableFaultInjection = "enableFaultInjection" case ephemeralStorage = "ephemeralStorage" case executionRoleArn = "executionRoleArn" case family = "family" @@ -7667,11 +8051,11 @@ extension ECS { } public struct TaskDefinitionPlacementConstraint: AWSEncodableShape & AWSDecodableShape { - /// A cluster query language expression to apply to the constraint. For more information, see Cluster - /// query language in the Amazon Elastic Container Service Developer Guide. + /// A cluster query language expression to apply to the constraint. For more information, + /// see Cluster query language in the Amazon Elastic Container Service Developer Guide. public let expression: String? - /// The type of constraint. The MemberOf constraint restricts selection to be from a group - /// of valid candidates. + /// The type of constraint. The MemberOf constraint restricts selection to be + /// from a group of valid candidates. public let type: TaskDefinitionPlacementConstraintType? @inlinable @@ -7687,10 +8071,12 @@ extension ECS { } public struct TaskEphemeralStorage: AWSDecodableShape { - /// Specify an Key Management Service key ID to encrypt the ephemeral storage for the task. + /// Specify an Key Management Service key ID to encrypt the ephemeral storage for the + /// task. public let kmsKeyId: String? - /// The total amount, in GiB, of the ephemeral storage to set for the task. The minimum supported value - /// is 20 GiB and the maximum supported value is
 200 GiB. + /// The total amount, in GiB, of the ephemeral storage to set for the task. The minimum + /// supported value is 20 GiB and the maximum supported value is + /// 200 GiB. public let sizeInGiB: Int? @inlinable @@ -7706,59 +8092,64 @@ extension ECS { } public struct TaskManagedEBSVolumeConfiguration: AWSEncodableShape { - /// Indicates whether the volume should be encrypted. If no value is specified, encryption is turned on - /// by default. This parameter maps 1:1 with the Encrypted parameter of the CreateVolume - /// API in the Amazon EC2 API Reference. + /// Indicates whether the volume should be encrypted. If no value is specified, encryption + /// is turned on by default. This parameter maps 1:1 with the Encrypted + /// parameter of the CreateVolume API in + /// the Amazon EC2 API Reference. public let encrypted: Bool? - /// The Linux filesystem type for the volume. For volumes created from a snapshot, you must specify the - /// same filesystem type that the volume was using when the snapshot was created. If there is a filesystem - /// type mismatch, the task will fail to start. The available filesystem types are
 ext3, ext4, and xfs. If no - /// value is specified, the xfs filesystem type is used by default. + /// The Linux filesystem type for the volume. For volumes created from a snapshot, you + /// must specify the same filesystem type that the volume was using when the snapshot was + /// created. If there is a filesystem type mismatch, the task will fail to start. The available filesystem types are
 ext3, ext4, and + /// xfs. If no value is specified, the xfs filesystem type is + /// used by default. public let filesystemType: TaskFilesystemType? - /// The number of I/O operations per second (IOPS). For gp3, io1, and - /// io2 volumes, this represents the number of IOPS that are provisioned for the volume. - /// For gp2 volumes, this represents the baseline performance of the volume and the rate at - /// which the volume accumulates I/O credits for bursting. The following are the supported values for each volume type. gp3: 3,000 - 16,000 IOPS io1: 100 - 64,000 IOPS io2: 100 - 256,000 IOPS This parameter is required for io1 and io2 volume types. The default for - /// gp3 volumes is 3,000 IOPS. This parameter is not supported for - /// st1, sc1, or standard volume types. This parameter maps 1:1 with the Iops parameter of the CreateVolume API in the - /// Amazon EC2 API Reference. + /// The number of I/O operations per second (IOPS). For gp3, + /// io1, and io2 volumes, this represents the number of IOPS that + /// are provisioned for the volume. For gp2 volumes, this represents the + /// baseline performance of the volume and the rate at which the volume accumulates I/O + /// credits for bursting. The following are the supported values for each volume type. gp3: 3,000 - 16,000 IOPS io1: 100 - 64,000 IOPS io2: 100 - 256,000 IOPS This parameter is required for io1 and io2 volume types. The + /// default for gp3 volumes is 3,000 IOPS. This parameter is not + /// supported for st1, sc1, or standard volume + /// types. This parameter maps 1:1 with the Iops parameter of the CreateVolume API in the Amazon EC2 API Reference. public let iops: Int? - /// The Amazon Resource Name (ARN) identifier of the Amazon Web Services Key Management Service key to use for Amazon EBS encryption. When encryption is turned - /// on and no Amazon Web Services Key Management Service key is specified, the default Amazon Web Services managed key for Amazon EBS volumes is used. This - /// parameter maps 1:1 with the KmsKeyId parameter of the CreateVolume API in the - /// Amazon EC2 API Reference. Amazon Web Services authenticates the Amazon Web Services Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or - /// ARN that is invalid, the action can appear to complete, but eventually fails. + /// The Amazon Resource Name (ARN) identifier of the Amazon Web Services Key Management Service key to use for Amazon EBS encryption. When + /// encryption is turned on and no Amazon Web Services Key Management Service key is specified, the default Amazon Web Services managed key + /// for Amazon EBS volumes is used. This parameter maps 1:1 with the KmsKeyId + /// parameter of the CreateVolume API in + /// the Amazon EC2 API Reference. Amazon Web Services authenticates the Amazon Web Services Key Management Service key asynchronously. Therefore, if you specify an + /// ID, alias, or ARN that is invalid, the action can appear to complete, but + /// eventually fails. public let kmsKeyId: String? - /// The ARN of the IAM role to associate with this volume. This is the Amazon ECS infrastructure IAM role - /// that is used to manage your Amazon Web Services infrastructure. We recommend using the Amazon ECS-managed - /// AmazonECSInfrastructureRolePolicyForVolumes IAM policy with this role. For more - /// information, see Amazon ECS infrastructure IAM - /// role in the Amazon ECS Developer Guide. + /// The ARN of the IAM role to associate with this volume. This is the Amazon ECS + /// infrastructure IAM role that is used to manage your Amazon Web Services infrastructure. We recommend + /// using the Amazon ECS-managed AmazonECSInfrastructureRolePolicyForVolumes IAM + /// policy with this role. For more information, see Amazon ECS + /// infrastructure IAM role in the Amazon ECS Developer + /// Guide. public let roleArn: String - /// The size of the volume in GiB. You must specify either a volume size or a snapshot ID. If you specify - /// a snapshot ID, the snapshot size is used for the volume size by default. You can optionally specify a - /// volume size greater than or equal to the snapshot size. This parameter maps 1:1 with the - /// Size parameter of the CreateVolume API in the - /// Amazon EC2 API Reference. The following are the supported volume size values for each volume type. gp2 and gp3: 1-16,384 io1 and io2: 4-16,384 st1 and sc1: 125-16,384 standard: 1-1,024 + /// The size of the volume in GiB. You must specify either a volume size or a snapshot ID. + /// If you specify a snapshot ID, the snapshot size is used for the volume size by default. + /// You can optionally specify a volume size greater than or equal to the snapshot size. + /// This parameter maps 1:1 with the Size parameter of the CreateVolume API in the Amazon EC2 API Reference. The following are the supported volume size values for each volume type. gp2 and gp3: 1-16,384 io1 and io2: 4-16,384 st1 and sc1: 125-16,384 standard: 1-1,024 public let sizeInGiB: Int? - /// The snapshot that Amazon ECS uses to create the volume. You must specify either a snapshot ID or a volume - /// size. This parameter maps 1:1 with the SnapshotId parameter of the CreateVolume - /// API in the Amazon EC2 API Reference. + /// The snapshot that Amazon ECS uses to create the volume. You must specify either a snapshot + /// ID or a volume size. This parameter maps 1:1 with the SnapshotId parameter + /// of the CreateVolume API in + /// the Amazon EC2 API Reference. public let snapshotId: String? - /// The tags to apply to the volume. Amazon ECS applies service-managed tags by default. This parameter maps - /// 1:1 with the TagSpecifications.N parameter of the CreateVolume API in the - /// Amazon EC2 API Reference. + /// The tags to apply to the volume. Amazon ECS applies service-managed tags by default. This + /// parameter maps 1:1 with the TagSpecifications.N parameter of the CreateVolume API in the Amazon EC2 API Reference. public let tagSpecifications: [EBSTagSpecification]? - /// The termination policy for the volume when the task exits. This provides a way to control whether - /// Amazon ECS terminates the Amazon EBS volume when the task stops. + /// The termination policy for the volume when the task exits. This provides a way to + /// control whether Amazon ECS terminates the Amazon EBS volume when the task stops. public let terminationPolicy: TaskManagedEBSVolumeTerminationPolicy? - /// The throughput to provision for a volume, in MiB/s, with a maximum of 1,000 MiB/s. This parameter - /// maps 1:1 with the Throughput parameter of the CreateVolume API in the - /// Amazon EC2 API Reference. This parameter is only supported for the gp3 volume type. + /// The throughput to provision for a volume, in MiB/s, with a maximum of 1,000 MiB/s. + /// This parameter maps 1:1 with the Throughput parameter of the CreateVolume API in the Amazon EC2 API Reference. This parameter is only supported for the gp3 volume type. public let throughput: Int? - /// The volume type. This parameter maps 1:1 with the VolumeType parameter of the CreateVolume - /// API in the Amazon EC2 API Reference. For more information, see Amazon EBS volume types - /// in the Amazon EC2 User Guide. The following are the supported volume types. General Purpose SSD: gp2|gp3 Provisioned IOPS SSD: io1|io2 Throughput Optimized HDD: st1 Cold HDD: sc1 Magnetic: standard The magnetic volume type is not supported on Fargate. + /// The volume type. This parameter maps 1:1 with the VolumeType parameter of + /// the CreateVolume API in the Amazon EC2 API Reference. For more + /// information, see Amazon EBS volume types in + /// the Amazon EC2 User Guide. The following are the supported volume types. General Purpose SSD: gp2|gp3 Provisioned IOPS SSD: io1|io2 Throughput Optimized HDD: st1 Cold HDD: sc1 Magnetic: standard The magnetic volume type is not supported on Fargate. public let volumeType: String? @inlinable @@ -7799,9 +8190,10 @@ extension ECS { public struct TaskManagedEBSVolumeTerminationPolicy: AWSEncodableShape { /// Indicates whether the volume should be deleted on when the task stops. If a value of - /// true is specified, 
Amazon ECS deletes the Amazon EBS volume on your behalf when the task goes - /// into the STOPPED state. If no value is specified, the 
default value is true - /// is used. When set to false, Amazon ECS leaves the volume in your 
account. + /// true is specified, 
Amazon ECS deletes the Amazon EBS volume on your behalf when + /// the task goes into the STOPPED state. If no value is specified, the + /// 
default value is true is used. When set to false, Amazon ECS + /// leaves the volume in your 
account. public let deleteOnTermination: Bool @inlinable @@ -7819,19 +8211,21 @@ extension ECS { public let containerOverrides: [ContainerOverride]? /// The CPU override for the task. public let cpu: String? - /// The ephemeral storage setting override for the task. This parameter is only supported for tasks hosted on Fargate that use the following - /// platform versions: Linux platform version 1.4.0 or later. Windows platform version 1.0.0 or later. + /// The ephemeral storage setting override for the task. This parameter is only supported for tasks hosted on Fargate that + /// use the following platform versions: Linux platform version 1.4.0 or later. Windows platform version 1.0.0 or later. public let ephemeralStorage: EphemeralStorage? - /// The Amazon Resource Name (ARN) of the task execution role override for the task. For more information, see Amazon ECS task + /// The Amazon Resource Name (ARN) of the task execution role override for the task. For more information, + /// see Amazon ECS task /// execution IAM role in the Amazon Elastic Container Service Developer Guide. public let executionRoleArn: String? /// The Elastic Inference accelerator override for the task. public let inferenceAcceleratorOverrides: [InferenceAcceleratorOverride]? /// The memory override for the task. public let memory: String? - /// The Amazon Resource Name (ARN) of the role that containers in this task can assume. All containers in this task are - /// granted the permissions that are specified in this role. For more information, see IAM Role for - /// Tasks in the Amazon Elastic Container Service Developer Guide. + /// The Amazon Resource Name (ARN) of the role that containers in this task can assume. All containers in + /// this task are granted the permissions that are specified in this role. For more + /// information, see IAM Role for Tasks + /// in the Amazon Elastic Container Service Developer Guide. public let taskRoleArn: String? @inlinable @@ -7859,70 +8253,77 @@ extension ECS { public struct TaskSet: AWSDecodableShape { /// The capacity provider strategy that are associated with the task set. public let capacityProviderStrategy: [CapacityProviderStrategyItem]? - /// The Amazon Resource Name (ARN) of the cluster that the service that hosts the task set exists in. + /// The Amazon Resource Name (ARN) of the cluster that the service that hosts the task set exists + /// in. public let clusterArn: String? - /// The computed desired count for the task set. This is calculated by multiplying the service's - /// desiredCount by the task set's scale percentage. The result is always - /// rounded up. For example, if the computed desired count is 1.2, it rounds up to 2 tasks. + /// The computed desired count for the task set. This is calculated by multiplying the + /// service's desiredCount by the task set's scale percentage. The + /// result is always rounded up. For example, if the computed desired count is 1.2, it + /// rounds up to 2 tasks. public let computedDesiredCount: Int? /// The Unix timestamp for the time when the task set was created. public let createdAt: Date? - /// The external ID associated with the task set. If an CodeDeploy deployment created a task set, the externalId parameter contains the CodeDeploy - /// deployment ID. If a task set is created for an external deployment and is associated with a service discovery - /// registry, the externalId parameter contains the ECS_TASK_SET_EXTERNAL_ID - /// Cloud Map attribute. + /// The external ID associated with the task set. If an CodeDeploy deployment created a task set, the externalId parameter + /// contains the CodeDeploy deployment ID. If a task set is created for an external deployment and is associated with a service + /// discovery registry, the externalId parameter contains the + /// ECS_TASK_SET_EXTERNAL_ID Cloud Map attribute. public let externalId: String? /// The Fargate ephemeral storage settings for the task set. public let fargateEphemeralStorage: DeploymentEphemeralStorage? /// The ID of the task set. public let id: String? - /// The launch type the tasks in the task set are using. For more information, see Amazon ECS launch - /// types in the Amazon Elastic Container Service Developer Guide. + /// The launch type the tasks in the task set are using. For more information, see Amazon ECS + /// launch types in the Amazon Elastic Container Service Developer Guide. public let launchType: LaunchType? /// Details on a load balancer that are used with a task set. public let loadBalancers: [LoadBalancer]? /// The network configuration for the task set. public let networkConfiguration: NetworkConfiguration? - /// The number of tasks in the task set that are in the PENDING status during a deployment. - /// A task in the PENDING state is preparing to enter the RUNNING state. A task - /// set enters the PENDING status when it launches for the first time or when it's restarted - /// after being in the STOPPED state. + /// The number of tasks in the task set that are in the PENDING status during + /// a deployment. A task in the PENDING state is preparing to enter the + /// RUNNING state. A task set enters the PENDING status when + /// it launches for the first time or when it's restarted after being in the + /// STOPPED state. public let pendingCount: Int? - /// The operating system that your tasks in the set are running on. A platform family is specified only - /// for tasks that use the Fargate launch type. All tasks in the set must have the same value. + /// The operating system that your tasks in the set are running on. A platform family is + /// specified only for tasks that use the Fargate launch type. All tasks in the set must have the same value. public let platformFamily: String? - /// The Fargate platform version where the tasks in the task set are running. A platform version is - /// only specified for tasks run on Fargate. For more information, see Fargate platform versions in - /// the Amazon Elastic Container Service Developer Guide. + /// The Fargate platform version where the tasks in the task set are running. A platform + /// version is only specified for tasks run on Fargate. For more information, see Fargate platform versions in the Amazon Elastic Container Service Developer Guide. public let platformVersion: String? - /// The number of tasks in the task set that are in the RUNNING status during a deployment. - /// A task in the RUNNING state is running and ready for use. + /// The number of tasks in the task set that are in the RUNNING status during + /// a deployment. A task in the RUNNING state is running and ready for + /// use. public let runningCount: Int? - /// A floating-point percentage of your desired number of tasks to place and keep running in the task - /// set. + /// A floating-point percentage of your desired number of tasks to place and keep running + /// in the task set. public let scale: Scale? /// The Amazon Resource Name (ARN) of the service the task set exists in. public let serviceArn: String? - /// The details for the service discovery registries to assign to this task set. For more information, - /// see Service + /// The details for the service discovery registries to assign to this task set. For more + /// information, see Service /// discovery. public let serviceRegistries: [ServiceRegistry]? - /// The stability status. This indicates whether the task set has reached a steady state. If the - /// following conditions are met, the task set are in STEADY_STATE: The task runningCount is equal to the computedDesiredCount. The pendingCount is 0. There are no tasks that are running on container instances in the DRAINING - /// status. All tasks are reporting a healthy status from the load balancers, service discovery, and - /// container health checks. If any of those conditions aren't met, the stability status returns STABILIZING. + /// The stability status. This indicates whether the task set has reached a steady state. + /// If the following conditions are met, the task set are in + /// STEADY_STATE: The task runningCount is equal to the + /// computedDesiredCount. The pendingCount is 0. There are no tasks that are running on container instances in the + /// DRAINING status. All tasks are reporting a healthy status from the load balancers, service + /// discovery, and container health checks. If any of those conditions aren't met, the stability status returns + /// STABILIZING. public let stabilityStatus: StabilityStatus? - /// The Unix timestamp for the time when the task set stability status was retrieved. + /// The Unix timestamp for the time when the task set stability status was + /// retrieved. public let stabilityStatusAt: Date? - /// The tag specified when a task set is started. If an CodeDeploy deployment created the task set, the - /// startedBy parameter is CODE_DEPLOY. If an external deployment created the - /// task set, the startedBy field isn't used. + /// The tag specified when a task set is started. If an CodeDeploy deployment created the task + /// set, the startedBy parameter is CODE_DEPLOY. If an external + /// deployment created the task set, the startedBy field isn't used. public let startedBy: String? - /// The status of the task set. The following describes each state. PRIMARY The task set is serving production traffic. ACTIVE The task set isn't serving production traffic. DRAINING The tasks in the task set are being stopped, and their corresponding targets are being - /// deregistered from their target group. + /// The status of the task set. The following describes each state. PRIMARY The task set is serving production traffic. ACTIVE The task set isn't serving production traffic. DRAINING The tasks in the task set are being stopped, and their corresponding + /// targets are being deregistered from their target group. public let status: String? - /// The metadata that you apply to the task set to help you categorize and organize them. Each tag - /// consists of a key and an optional value. You define both. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. + /// The metadata that you apply to the task set to help you categorize and organize them. + /// Each tag consists of a key and an optional value. You define both. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. public let tags: [Tag]? /// The task definition that the task set is using. public let taskDefinition: String? @@ -7990,12 +8391,13 @@ extension ECS { } public struct TaskVolumeConfiguration: AWSEncodableShape { - /// The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf. These settings - /// are used to create each Amazon EBS volume, with one volume created for each task. The Amazon EBS volumes are - /// visible in your account in the Amazon EC2 console once they are created. + /// The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf. + /// These settings are used to create each Amazon EBS volume, with one volume created for each + /// task. The Amazon EBS volumes are visible in your account in the Amazon EC2 console once they are + /// created. public let managedEBSVolume: TaskManagedEBSVolumeConfiguration? - /// The name of the volume. This value must match the volume name from the Volume object in - /// the task definition. + /// The name of the volume. This value must match the volume name from the + /// Volume object in the task definition. public let name: String @inlinable @@ -8015,14 +8417,15 @@ extension ECS { } public struct TimeoutConfiguration: AWSEncodableShape & AWSDecodableShape { - /// The amount of time in seconds a connection will stay active while idle. A value of 0 can - /// be set to disable idleTimeout. The idleTimeout default for HTTP/HTTP2/GRPC is 5 - /// minutes. The idleTimeout default for TCP is 1 hour. + /// The amount of time in seconds a connection will stay active while idle. A value of + /// 0 can be set to disable idleTimeout. The idleTimeout default for + /// HTTP/HTTP2/GRPC is 5 minutes. The idleTimeout default for TCP is 1 hour. public let idleTimeoutSeconds: Int? - /// The amount of time waiting for the upstream to respond with a complete response per request. A value - /// of 0 can be set to disable perRequestTimeout. perRequestTimeout - /// can only be set if Service Connect appProtocol isn't TCP. Only - /// idleTimeout is allowed for TCP appProtocol. + /// The amount of time waiting for the upstream to respond with a complete response per + /// request. A value of 0 can be set to disable perRequestTimeout. + /// perRequestTimeout can only be set if Service Connect + /// appProtocol isn't TCP. Only idleTimeout is + /// allowed for TCP appProtocol. public let perRequestTimeoutSeconds: Int? @inlinable @@ -8047,11 +8450,12 @@ extension ECS { public struct Tmpfs: AWSEncodableShape & AWSDecodableShape { /// The absolute file path where the tmpfs volume is to be mounted. public let containerPath: String - /// The list of tmpfs volume mount options. Valid values: "defaults" | "ro" | "rw" | "suid" | "nosuid" | "dev" | "nodev" | "exec" | - /// "noexec" | "sync" | "async" | "dirsync" | "remount" | "mand" | "nomand" | "atime" | "noatime" | - /// "diratime" | "nodiratime" | "bind" | "rbind" | "unbindable" | "runbindable" | "private" | - /// "rprivate" | "shared" | "rshared" | "slave" | "rslave" | "relatime" | "norelatime" | "strictatime" - /// | "nostrictatime" | "mode" | "uid" | "gid" | "nr_inodes" | "nr_blocks" | "mpol" + /// The list of tmpfs volume mount options. Valid values: "defaults" | "ro" | "rw" | "suid" | "nosuid" | "dev" | "nodev" | + /// "exec" | "noexec" | "sync" | "async" | "dirsync" | "remount" | "mand" | "nomand" | + /// "atime" | "noatime" | "diratime" | "nodiratime" | "bind" | "rbind" | "unbindable" | + /// "runbindable" | "private" | "rprivate" | "shared" | "rshared" | "slave" | "rslave" | + /// "relatime" | "norelatime" | "strictatime" | "nostrictatime" | "mode" | "uid" | "gid" + /// | "nr_inodes" | "nr_blocks" | "mpol" public let mountOptions: [String]? /// The maximum size (in MiB) of the tmpfs volume. public let size: Int @@ -8071,13 +8475,15 @@ extension ECS { } public struct Ulimit: AWSEncodableShape & AWSDecodableShape { - /// The hard limit for the ulimit type. The value can be specified in bytes, seconds, or as - /// a count, depending on the type of the ulimit. + /// The hard limit for the ulimit type. The value can be specified in bytes, + /// seconds, or as a count, depending on the type of the + /// ulimit. public let hardLimit: Int /// The type of the ulimit. public let name: UlimitName - /// The soft limit for the ulimit type. The value can be specified in bytes, seconds, or as - /// a count, depending on the type of the ulimit. + /// The soft limit for the ulimit type. The value can be specified in bytes, + /// seconds, or as a count, depending on the type of the + /// ulimit. public let softLimit: Int @inlinable @@ -8095,8 +8501,9 @@ extension ECS { } public struct UntagResourceRequest: AWSEncodableShape { - /// The Amazon Resource Name (ARN) of the resource to delete tags from. Currently, the supported resources are Amazon ECS - /// capacity providers, tasks, services, task definitions, clusters, and container instances. + /// The Amazon Resource Name (ARN) of the resource to delete tags from. Currently, the supported resources + /// are Amazon ECS capacity providers, tasks, services, task definitions, clusters, and container + /// instances. public let resourceArn: String /// The keys of the tags to be removed. public let tagKeys: [String] @@ -8215,12 +8622,13 @@ extension ECS { public struct UpdateClusterSettingsRequest: AWSEncodableShape { /// The name of the cluster to modify the settings for. public let cluster: String - /// The setting to use by default for a cluster. This parameter is used to turn on CloudWatch Container - /// Insights for a cluster. If this value is specified, it overrides the containerInsights - /// value set with PutAccountSetting or PutAccountSettingDefault. Currently, if you delete an existing cluster that does not have Container Insights turned on, and - /// then create a new cluster with the same name with Container Insights tuned on, Container Insights - /// will not actually be turned on. If you want to preserve the same name for your existing cluster and - /// turn on Container Insights, you must wait 7 days before you can re-create it. + /// The setting to use by default for a cluster. This parameter is used to turn on CloudWatch + /// Container Insights for a cluster. If this value is specified, it overrides the + /// containerInsights value set with PutAccountSetting or PutAccountSettingDefault. Currently, if you delete an existing cluster that does not have Container Insights + /// turned on, and then create a new cluster with the same name with Container Insights + /// tuned on, Container Insights will not actually be turned on. If you want to preserve + /// the same name for your existing cluster and turn on Container Insights, you must + /// wait 7 days before you can re-create it. public let settings: [ClusterSetting] @inlinable @@ -8250,11 +8658,11 @@ extension ECS { } public struct UpdateContainerAgentRequest: AWSEncodableShape { - /// The short name or full Amazon Resource Name (ARN) of the cluster that your container instance is running on. - /// If you do not specify a cluster, the default cluster is assumed. + /// The short name or full Amazon Resource Name (ARN) of the cluster that your container instance is + /// running on. If you do not specify a cluster, the default cluster is assumed. public let cluster: String? - /// The container instance ID or full ARN entries for the container instance where you would like to - /// update the Amazon ECS container agent. + /// The container instance ID or full ARN entries for the container instance where you + /// would like to update the Amazon ECS container agent. public let containerInstance: String @inlinable @@ -8284,17 +8692,17 @@ extension ECS { } public struct UpdateContainerInstancesStateRequest: AWSEncodableShape { - /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instance to update. - /// If you do not specify a cluster, the default cluster is assumed. + /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instance to + /// update. If you do not specify a cluster, the default cluster is assumed. public let cluster: String? /// A list of up to 10 container instance IDs or full ARN entries. public let containerInstances: [String] - /// The container instance state to update the container instance with. The only valid values for this - /// action are ACTIVE and DRAINING. A container instance can only be updated to - /// DRAINING status once it has reached an ACTIVE state. If a container - /// instance is in REGISTERING, DEREGISTERING, or - /// REGISTRATION_FAILED state you can describe the container instance but can't update the - /// container instance state. + /// The container instance state to update the container instance with. The only valid + /// values for this action are ACTIVE and DRAINING. A container + /// instance can only be updated to DRAINING status once it has reached an + /// ACTIVE state. If a container instance is in REGISTERING, + /// DEREGISTERING, or REGISTRATION_FAILED state you can + /// describe the container instance but can't update the container instance state. public let status: ContainerInstanceStatus @inlinable @@ -8330,8 +8738,8 @@ extension ECS { } public struct UpdateServicePrimaryTaskSetRequest: AWSEncodableShape { - /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task set exists - /// in. + /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task + /// set exists in. public let cluster: String /// The short name or full Amazon Resource Name (ARN) of the task set to set as the primary task set in the /// deployment. @@ -8371,83 +8779,96 @@ extension ECS { /// Indicates whether to use Availability Zone rebalancing for the service. For more information, see Balancing an Amazon ECS service across Availability Zones in /// the Amazon Elastic Container Service Developer Guide. public let availabilityZoneRebalancing: AvailabilityZoneRebalancing? - /// The capacity provider strategy to update the service to use. if the service uses the default capacity provider strategy for the cluster, the service can be - /// updated to use one or more capacity providers as opposed to the default capacity provider strategy. - /// However, when a service is using a capacity provider strategy that's not the default capacity provider - /// strategy, the service can't be updated to use the cluster's default capacity provider strategy. A capacity provider strategy consists of one or more capacity providers along with the - /// base and weight to assign to them. A capacity provider must be associated - /// with the cluster to be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster. - /// Only capacity providers with an ACTIVE or UPDATING status can be used. If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already - /// be created. New capacity providers can be created with the CreateClusterCapacityProvider API operation. To use a Fargate capacity provider, specify either the FARGATE or - /// FARGATE_SPOT capacity providers. The Fargate capacity providers are available to all - /// accounts and only need to be associated with a cluster to be used. The PutClusterCapacityProvidersAPI operation is used to update the list of available capacity - /// providers for a cluster after the cluster is created. + /// The capacity provider strategy to update the service to use. if the service uses the default capacity provider strategy for the cluster, the + /// service can be updated to use one or more capacity providers as opposed to the default + /// capacity provider strategy. However, when a service is using a capacity provider + /// strategy that's not the default capacity provider strategy, the service can't be updated + /// to use the cluster's default capacity provider strategy. A capacity provider strategy consists of one or more capacity providers along with the + /// base and weight to assign to them. A capacity provider + /// must be associated with the cluster to be used in a capacity provider strategy. The + /// PutClusterCapacityProviders API is used to associate a capacity provider + /// with a cluster. Only capacity providers with an ACTIVE or + /// UPDATING status can be used. If specifying a capacity provider that uses an Auto Scaling group, the capacity + /// provider must already be created. New capacity providers can be created with the CreateClusterCapacityProvider API operation. To use a Fargate capacity provider, specify either the FARGATE or + /// FARGATE_SPOT capacity providers. The Fargate capacity providers are + /// available to all accounts and only need to be associated with a cluster to be + /// used. The PutClusterCapacityProvidersAPI operation is used to update the list of + /// available capacity providers for a cluster after the cluster is created. public let capacityProviderStrategy: [CapacityProviderStrategyItem]? /// The short name or full Amazon Resource Name (ARN) of the cluster that your service runs on. /// If you do not specify a cluster, the default cluster is assumed. public let cluster: String? - /// Optional deployment parameters that control how many tasks run during the deployment and the ordering - /// of stopping and starting tasks. + /// Optional deployment parameters that control how many tasks run during the deployment + /// and the ordering of stopping and starting tasks. public let deploymentConfiguration: DeploymentConfiguration? - /// The number of instantiations of the task to place and keep running in your service. + /// The number of instantiations of the task to place and keep running in your + /// service. public let desiredCount: Int? - /// Determines whether to turn on Amazon ECS managed tags for the tasks in the service. For more information, - /// see Tagging Your - /// Amazon ECS Resources in the Amazon Elastic Container Service Developer Guide. Only tasks launched after the update will reflect the update. To update the tags on all tasks, set - /// forceNewDeployment to true, so that Amazon ECS starts new tasks with the - /// updated tags. + /// Determines whether to turn on Amazon ECS managed tags for the tasks in the service. For + /// more information, see Tagging Your Amazon ECS + /// Resources in the Amazon Elastic Container Service Developer Guide. Only tasks launched after the update will reflect the update. To update the tags on + /// all tasks, set forceNewDeployment to true, so that Amazon ECS + /// starts new tasks with the updated tags. public let enableECSManagedTags: Bool? - /// If true, this enables execute command functionality on all task containers. If you do not want to override the value that was set when the service was created, you can set this - /// to null when performing this action. + /// If true, this enables execute command functionality on all task + /// containers. If you do not want to override the value that was set when the service was created, + /// you can set this to null when performing this action. public let enableExecuteCommand: Bool? - /// Determines whether to force a new deployment of the service. By default, deployments aren't forced. - /// You can use this option to start a new deployment with no service definition changes. For example, you - /// can update a service's tasks to use a newer Docker image with the same image/tag combination - /// (my_image:latest) or to roll Fargate tasks onto a newer platform version. + /// Determines whether to force a new deployment of the service. By default, deployments + /// aren't forced. You can use this option to start a new deployment with no service + /// definition changes. For example, you can update a service's tasks to use a newer Docker + /// image with the same image/tag combination (my_image:latest) or to roll + /// Fargate tasks onto a newer platform version. public let forceNewDeployment: Bool? - /// The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing, VPC Lattice, and container - /// health checks after a task has first started. If you don't specify a health check grace - /// period value, the default value of 0 is used. If you don't use any of the health checks, - /// then healthCheckGracePeriodSeconds is unused. If your service's tasks take a while to start and respond to health checks, you can specify a - /// health check grace period of up to 2,147,483,647 seconds (about 69 years). During that time, the Amazon ECS - /// service scheduler ignores health check status. This grace period can prevent the service scheduler from - /// marking tasks as unhealthy and stopping them before they have time to come up. + /// The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy + /// Elastic Load Balancing, VPC Lattice, and container health checks after a task has first started. If you don't + /// specify a health check grace period value, the default value of 0 is used. + /// If you don't use any of the health checks, then + /// healthCheckGracePeriodSeconds is unused. If your service's tasks take a while to start and respond to health checks, you can + /// specify a health check grace period of up to 2,147,483,647 seconds (about 69 years). + /// During that time, the Amazon ECS service scheduler ignores health check status. This grace + /// period can prevent the service scheduler from marking tasks as unhealthy and stopping + /// them before they have time to come up. public let healthCheckGracePeriodSeconds: Int? - /// A list of Elastic Load Balancing load balancer objects. It contains the load balancer name, the container name, and - /// the container port to access from the load balancer. The container name is as it appears in a container - /// definition. When you add, update, or remove a load balancer configuration, Amazon ECS starts new tasks with the - /// updated Elastic Load Balancing configuration, and then stops the old tasks when the new tasks are running. For services that use rolling updates, you can add, update, or remove Elastic Load Balancing target groups. You can - /// update from a single target group to multiple target groups and from multiple target groups to a single - /// target group. For services that use blue/green deployments, you can update Elastic Load Balancing target groups by using - /// CreateDeployment - /// through CodeDeploy. Note that multiple target groups are not supported for blue/green deployments. For more - /// information see Register multiple target - /// groups with a service in the Amazon Elastic Container Service Developer Guide. For services that use the external deployment controller, you can add, update, or remove load - /// balancers by using CreateTaskSet. Note that - /// multiple target groups are not supported for external deployments. For more information see Register multiple target groups with a service in the Amazon Elastic Container Service Developer Guide. You can remove existing loadBalancers by passing an empty list. + /// A list of Elastic Load Balancing load balancer objects. It contains the load balancer name, the + /// container name, and the container port to access from the load balancer. The container + /// name is as it appears in a container definition. When you add, update, or remove a load balancer configuration, Amazon ECS starts new tasks + /// with the updated Elastic Load Balancing configuration, and then stops the old tasks when the new tasks + /// are running. For services that use rolling updates, you can add, update, or remove Elastic Load Balancing target + /// groups. You can update from a single target group to multiple target groups and from + /// multiple target groups to a single target group. For services that use blue/green deployments, you can update Elastic Load Balancing target groups by + /// using CreateDeployment through CodeDeploy. Note that multiple target groups + /// are not supported for blue/green deployments. For more information see Register + /// multiple target groups with a service in the Amazon Elastic Container Service Developer Guide. For services that use the external deployment controller, you can add, update, or + /// remove load balancers by using CreateTaskSet. + /// Note that multiple target groups are not supported for external deployments. For more + /// information see Register + /// multiple target groups with a service in the Amazon Elastic Container Service Developer Guide. You can remove existing loadBalancers by passing an empty list. public let loadBalancers: [LoadBalancer]? /// An object representing the network configuration for the service. public let networkConfiguration: NetworkConfiguration? - /// An array of task placement constraint objects to update the service to use. If no value is specified, - /// the existing placement constraints for the service will remain unchanged. If this value is specified, - /// it will override any existing placement constraints defined for the service. To remove all existing - /// placement constraints, specify an empty array. You can specify a maximum of 10 constraints for each task. This limit includes constraints in the - /// task definition and those specified at runtime. + /// An array of task placement constraint objects to update the service to use. If no + /// value is specified, the existing placement constraints for the service will remain + /// unchanged. If this value is specified, it will override any existing placement + /// constraints defined for the service. To remove all existing placement constraints, + /// specify an empty array. You can specify a maximum of 10 constraints for each task. This limit includes + /// constraints in the task definition and those specified at runtime. public let placementConstraints: [PlacementConstraint]? - /// The task placement strategy objects to update the service to use. If no value is specified, the - /// existing placement strategy for the service will remain unchanged. If this value is specified, it will - /// override the existing placement strategy defined for the service. To remove an existing placement - /// strategy, specify an empty object. You can specify a maximum of five strategy rules for each service. + /// The task placement strategy objects to update the service to use. If no value is + /// specified, the existing placement strategy for the service will remain unchanged. If + /// this value is specified, it will override the existing placement strategy defined for + /// the service. To remove an existing placement strategy, specify an empty object. You can specify a maximum of five strategy rules for each service. public let placementStrategy: [PlacementStrategy]? - /// The platform version that your tasks in the service run on. A platform version is only specified for - /// tasks using the Fargate launch type. If a platform version is not specified, the - /// LATEST platform version is used. For more information, see Fargate Platform Versions in - /// the Amazon Elastic Container Service Developer Guide. + /// The platform version that your tasks in the service run on. A platform version is only + /// specified for tasks using the Fargate launch type. If a platform version + /// is not specified, the LATEST platform version is used. For more + /// information, see Fargate Platform + /// Versions in the Amazon Elastic Container Service Developer Guide. public let platformVersion: String? - /// Determines whether to propagate the tags from the task definition or the service to the task. If no - /// value is specified, the tags aren't propagated. Only tasks launched after the update will reflect the update. To update the tags on all tasks, set - /// forceNewDeployment to true, so that Amazon ECS starts new tasks with the - /// updated tags. + /// Determines whether to propagate the tags from the task definition or the service to + /// the task. If no value is specified, the tags aren't propagated. Only tasks launched after the update will reflect the update. To update the tags on + /// all tasks, set forceNewDeployment to true, so that Amazon ECS + /// starts new tasks with the updated tags. public let propagateTags: PropagateTags? /// The name of the service to update. public let service: String @@ -8459,24 +8880,27 @@ extension ECS { /// Only the tasks that Amazon ECS services create are supported with Service Connect. /// For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide. public let serviceConnectConfiguration: ServiceConnectConfiguration? - /// The details for the service discovery registries to assign to this service. For more information, see - /// Service - /// Discovery. When you add, update, or remove the service registries configuration, Amazon ECS starts new tasks with the - /// updated service registries configuration, and then stops the old tasks when the new tasks are - /// running. You can remove existing serviceRegistries by passing an empty list. + /// The details for the service discovery registries to assign to this service. For more + /// information, see Service + /// Discovery. When you add, update, or remove the service registries configuration, Amazon ECS starts new + /// tasks with the updated service registries configuration, and then stops the old tasks + /// when the new tasks are running. You can remove existing serviceRegistries by passing an empty + /// list. public let serviceRegistries: [ServiceRegistry]? - /// The family and revision (family:revision) or full ARN of the - /// task definition to run in your service. If a revision is not specified, the latest - /// ACTIVE revision is used. If you modify the task definition with - /// UpdateService, Amazon ECS spawns a task with the new version of the task definition and - /// then stops an old task after the new version is running. + /// The family and revision (family:revision) or + /// full ARN of the task definition to run in your service. If a revision is + /// not specified, the latest ACTIVE revision is used. If you modify the task + /// definition with UpdateService, Amazon ECS spawns a task with the new version of + /// the task definition and then stops an old task after the new version is running. public let taskDefinition: String? - /// The details of the volume that was configuredAtLaunch. You can configure the size, - /// volumeType, IOPS, throughput, snapshot and encryption in ServiceManagedEBSVolumeConfiguration. The name of the volume must match the - /// name from the task definition. If set to null, no new deployment is triggered. - /// Otherwise, if this configuration differs from the existing one, it triggers a new deployment. + /// The details of the volume that was configuredAtLaunch. You can configure + /// the size, volumeType, IOPS, throughput, snapshot and encryption in ServiceManagedEBSVolumeConfiguration. The name of the volume + /// must match the name from the task definition. If set to null, no new + /// deployment is triggered. Otherwise, if this configuration differs from the existing one, + /// it triggers a new deployment. public let volumeConfigurations: [ServiceVolumeConfiguration]? - /// An object representing the VPC Lattice configuration for the service being updated. + /// An object representing the VPC Lattice configuration for the service being + /// updated. public let vpcLatticeConfigurations: [VpcLatticeConfiguration]? @inlinable @@ -8554,18 +8978,18 @@ extension ECS { } public struct UpdateTaskProtectionRequest: AWSEncodableShape { - /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task sets exist - /// in. + /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task + /// sets exist in. public let cluster: String - /// If you set protectionEnabled to true, you can specify the duration for task - /// protection in minutes. You can specify a value from 1 minute to up to 2,880 minutes (48 hours). During - /// this time, your task will not be terminated by scale-in events from Service Auto Scaling or - /// deployments. After this time period lapses, protectionEnabled will be reset to - /// false. If you don’t specify the time, then the task is automatically protected for 120 minutes (2 - /// hours). + /// If you set protectionEnabled to true, you can specify the + /// duration for task protection in minutes. You can specify a value from 1 minute to up to + /// 2,880 minutes (48 hours). During this time, your task will not be terminated by scale-in + /// events from Service Auto Scaling or deployments. After this time period lapses, + /// protectionEnabled will be reset to false. If you don’t specify the time, then the task is automatically protected for 120 + /// minutes (2 hours). public let expiresInMinutes: Int? - /// Specify true to mark a task for protection and false to unset protection, - /// making it eligible for termination. + /// Specify true to mark a task for protection and false to + /// unset protection, making it eligible for termination. public let protectionEnabled: Bool /// A list of up to 10 task IDs or full ARN entries. public let tasks: [String] @@ -8589,9 +9013,10 @@ extension ECS { public struct UpdateTaskProtectionResponse: AWSDecodableShape { /// Any failures associated with the call. public let failures: [Failure]? - /// A list of tasks with the following information. taskArn: The task ARN. protectionEnabled: The protection status of the task. If scale-in protection is - /// turned on for a task, the value is true. Otherwise, it is - /// false. expirationDate: The epoch time when protection for the task will expire. + /// A list of tasks with the following information. taskArn: The task ARN. protectionEnabled: The protection status of the task. If scale-in + /// protection is turned on for a task, the value is true. Otherwise, + /// it is false. expirationDate: The epoch time when protection for the task will + /// expire. public let protectedTasks: [ProtectedTask]? @inlinable @@ -8607,11 +9032,11 @@ extension ECS { } public struct UpdateTaskSetRequest: AWSEncodableShape { - /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task set is found - /// in. + /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task + /// set is found in. public let cluster: String - /// A floating-point percentage of the desired number of tasks to place and keep running in the task - /// set. + /// A floating-point percentage of the desired number of tasks to place and keep running + /// in the task set. public let scale: Scale /// The short name or full Amazon Resource Name (ARN) of the service that the task set is found in. public let service: String @@ -8649,8 +9074,8 @@ extension ECS { } public struct VersionInfo: AWSEncodableShape & AWSDecodableShape { - /// The Git commit hash for the Amazon ECS container agent build on the amazon-ecs-agent GitHub - /// repository. + /// The Git commit hash for the Amazon ECS container agent build on the amazon-ecs-agent + /// GitHub repository. public let agentHash: String? /// The version number of the Amazon ECS container agent. public let agentVersion: String? @@ -8672,33 +9097,41 @@ extension ECS { } public struct Volume: AWSEncodableShape & AWSDecodableShape { - /// Indicates whether the volume should be configured at launch time. This is used to create Amazon EBS - /// volumes for standalone tasks or tasks created as part of a service. Each task definition revision may - /// only have one volume configured at launch in the volume configuration. To configure a volume at launch time, use this task definition revision and specify a - /// volumeConfigurations object when calling the CreateService, - /// UpdateService, RunTask or StartTask APIs. + /// Indicates whether the volume should be configured at launch time. This is used to + /// create Amazon EBS volumes for standalone tasks or tasks created as part of a service. Each + /// task definition revision may only have one volume configured at launch in the volume + /// configuration. To configure a volume at launch time, use this task definition revision and specify a + /// volumeConfigurations object when calling the + /// CreateService, UpdateService, RunTask or + /// StartTask APIs. public let configuredAtLaunch: Bool? - /// This parameter is specified when you use Docker volumes. Windows containers only support the use of the local driver. To use bind mounts, specify - /// the host parameter instead. Docker volumes aren't supported by tasks run on Fargate. + /// This parameter is specified when you use Docker volumes. Windows containers only support the use of the local driver. To use bind + /// mounts, specify the host parameter instead. Docker volumes aren't supported by tasks run on Fargate. public let dockerVolumeConfiguration: DockerVolumeConfiguration? - /// This parameter is specified when you use an Amazon Elastic File System file system for task storage. + /// This parameter is specified when you use an Amazon Elastic File System file system for task + /// storage. public let efsVolumeConfiguration: EFSVolumeConfiguration? - /// This parameter is specified when you use Amazon FSx for Windows File Server file system for task storage. + /// This parameter is specified when you use Amazon FSx for Windows File Server file system for task + /// storage. public let fsxWindowsFileServerVolumeConfiguration: FSxWindowsFileServerVolumeConfiguration? /// This parameter is specified when you use bind mount host volumes. The contents of the - /// host parameter determine whether your bind mount host volume persists on the host - /// container instance and where it's stored. If the host parameter is empty, then the Docker - /// daemon assigns a host path for your data volume. However, the data isn't guaranteed to persist after - /// the containers that are associated with it stop running. Windows containers can mount whole directories on the same drive as $env:ProgramData. - /// Windows containers can't mount directories on a different drive, and mount point can't be across - /// drives. For example, you can mount C:\my\path:C:\my\path and D:\:D:\, but not + /// host parameter determine whether your bind mount host volume persists + /// on the host container instance and where it's stored. If the host parameter + /// is empty, then the Docker daemon assigns a host path for your data volume. However, the + /// data isn't guaranteed to persist after the containers that are associated with it stop + /// running. Windows containers can mount whole directories on the same drive as + /// $env:ProgramData. Windows containers can't mount directories on a + /// different drive, and mount point can't be across drives. For example, you can mount + /// C:\my\path:C:\my\path and D:\:D:\, but not /// D:\my\path:C:\my\path or D:\:C:\my\path. public let host: HostVolumeProperties? - /// The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. When using a volume configured at launch, the name is required and must also be - /// specified as the volume name in the ServiceVolumeConfiguration or - /// TaskVolumeConfiguration parameter when creating your service or standalone - /// task. For all other types of volumes, this name is referenced in the sourceVolume parameter of - /// the mountPoints object in the container definition. When a volume is using the efsVolumeConfiguration, the name is required. + /// The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. When using a volume configured at launch, the name is required and must + /// also be specified as the volume name in the ServiceVolumeConfiguration or + /// TaskVolumeConfiguration parameter when creating your service or + /// standalone task. For all other types of volumes, this name is referenced in the + /// sourceVolume parameter of the mountPoints object in the + /// container definition. When a volume is using the efsVolumeConfiguration, the name is + /// required. public let name: String? @inlinable @@ -8722,11 +9155,12 @@ extension ECS { } public struct VolumeFrom: AWSEncodableShape & AWSDecodableShape { - /// If this value is true, the container has read-only access to the volume. If this value - /// is false, then the container can write to the volume. The default value is - /// false. + /// If this value is true, the container has read-only access to the volume. + /// If this value is false, then the container can write to the volume. The + /// default value is false. public let readOnly: Bool? - /// The name of another container within the same task definition to mount volumes from. + /// The name of another container within the same task definition to mount volumes + /// from. public let sourceContainer: String? @inlinable @@ -8742,11 +9176,12 @@ extension ECS { } public struct VpcLatticeConfiguration: AWSEncodableShape & AWSDecodableShape { - /// The name of the port mapping to register in the VPC Lattice target group. This is the - /// name of the portMapping you defined in your task definition. + /// The name of the port mapping to register in the VPC Lattice target group. This is the name + /// of the portMapping you defined in your task definition. public let portName: String - /// The ARN of the IAM role to associate with this VPC Lattice configuration. This is the Amazon ECS - /// infrastructure IAM role that is used to manage your VPC Lattice infrastructure. + /// The ARN of the IAM role to associate with this VPC Lattice configuration. This is the + /// Amazon ECS
 infrastructure IAM role that is used to manage your VPC Lattice + /// infrastructure. public let roleArn: String /// The full Amazon Resource Name (ARN) of the target group or groups associated with the VPC Lattice /// configuration that the Amazon ECS tasks will be registered to. @@ -8820,48 +9255,55 @@ public struct ECSErrorType: AWSErrorType { /// You don't have authorization to perform the requested action. public static var accessDeniedException: Self { .init(.accessDeniedException) } - /// You can apply up to 10 custom attributes for each resource. You can view the attributes of a resource - /// with ListAttributes. You can remove existing attributes on a resource with DeleteAttributes. + /// You can apply up to 10 custom attributes for each resource. You can view the + /// attributes of a resource with ListAttributes. + /// You can remove existing attributes on a resource with DeleteAttributes. public static var attributeLimitExceededException: Self { .init(.attributeLimitExceededException) } /// Your Amazon Web Services account was blocked. For more information, contact /// Amazon Web Services Support. public static var blockedException: Self { .init(.blockedException) } - /// These errors are usually caused by a client action. This client action might be using an action or - /// resource on behalf of a user that doesn't have permissions to use the action or resource. Or, it might - /// be specifying an identifier that isn't valid. The following list includes additional causes for the error: The RunTask could not be processed because you use managed scaling and there is - /// a capacity error because the quota of tasks in the PROVISIONING per cluster has - /// been reached. For information about the service quotas, see Amazon ECS service - /// quotas. + /// These errors are usually caused by a client action. This client action might be using + /// an action or resource on behalf of a user that doesn't have permissions to use the + /// action or resource. Or, it might be specifying an identifier that isn't valid. The following list includes additional causes for the error: The RunTask could not be processed because you use managed + /// scaling and there is a capacity error because the quota of tasks in the + /// PROVISIONING per cluster has been reached. For information + /// about the service quotas, see Amazon ECS + /// service quotas. public static var clientException: Self { .init(.clientException) } - /// You can't delete a cluster that has registered container instances. First, deregister the container - /// instances before you can delete the cluster. For more information, see DeregisterContainerInstance. + /// You can't delete a cluster that has registered container instances. First, deregister + /// the container instances before you can delete the cluster. For more information, see + /// DeregisterContainerInstance. public static var clusterContainsContainerInstancesException: Self { .init(.clusterContainsContainerInstancesException) } - /// You can't delete a cluster that contains services. First, update the service to reduce its desired - /// task count to 0, and then delete the service. For more information, see UpdateService and DeleteService. + /// You can't delete a cluster that contains services. First, update the service to reduce + /// its desired task count to 0, and then delete the service. For more information, see + /// UpdateService and + /// DeleteService. public static var clusterContainsServicesException: Self { .init(.clusterContainsServicesException) } /// You can't delete a cluster that has active tasks. public static var clusterContainsTasksException: Self { .init(.clusterContainsTasksException) } /// The specified cluster wasn't found. You can view your available clusters with ListClusters. Amazon ECS clusters are Region specific. public static var clusterNotFoundException: Self { .init(.clusterNotFoundException) } /// The RunTask request could not be processed due to conflicts. The provided - /// clientToken is already in use with a different RunTask request. The - /// resourceIds are the existing task ARNs which are already associated with the - /// clientToken. To fix this issue: Run RunTask with a unique clientToken. Run RunTask with the clientToken and the original set of - /// parameters + /// clientToken is already in use with a different RunTask + /// request. The resourceIds are the existing task ARNs which are already + /// associated with the clientToken. To fix this issue: Run RunTask with a unique clientToken. Run RunTask with the clientToken and the original + /// set of parameters public static var conflictException: Self { .init(.conflictException) } - /// The specified parameter isn't valid. Review the available parameters for the API request. + /// The specified parameter isn't valid. Review the available parameters for the API + /// request. public static var invalidParameterException: Self { .init(.invalidParameterException) } /// The limit for the resource was exceeded. public static var limitExceededException: Self { .init(.limitExceededException) } - /// Amazon ECS can't determine the current version of the Amazon ECS container agent on the container instance and - /// doesn't have enough information to proceed with an update. This could be because the agent running on - /// the container instance is a previous or custom version that doesn't use our version information. + /// Amazon ECS can't determine the current version of the Amazon ECS container agent on the + /// container instance and doesn't have enough information to proceed with an update. This + /// could be because the agent running on the container instance is a previous or custom + /// version that doesn't use our version information. public static var missingVersionException: Self { .init(.missingVersionException) } /// The specified namespace wasn't found. public static var namespaceNotFoundException: Self { .init(.namespaceNotFoundException) } - /// There's no update available for this Amazon ECS container agent. This might be because the agent is - /// already running the latest version or because it's so old that there's no update path to the current - /// version. + /// There's no update available for this Amazon ECS container agent. This might be because the + /// agent is already running the latest version or because it's so old that there's no + /// update path to the current version. public static var noUpdateAvailableException: Self { .init(.noUpdateAvailableException) } /// The specified platform version doesn't satisfy the required capabilities of the task /// definition. @@ -8874,27 +9316,31 @@ public struct ECSErrorType: AWSErrorType { public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } /// These errors are usually caused by a server issue. public static var serverException: Self { .init(.serverException) } - /// The specified service isn't active. You can't update a service that's inactive. If you have - /// previously deleted a service, you can re-create it with CreateService. + /// The specified service isn't active. You can't update a service that's inactive. If you + /// have previously deleted a service, you can re-create it with CreateService. public static var serviceNotActiveException: Self { .init(.serviceNotActiveException) } - /// The specified service wasn't found. You can view your available services with ListServices. Amazon ECS services are cluster specific and Region specific. + /// The specified service wasn't found. You can view your available services with ListServices. Amazon ECS services are cluster specific and Region + /// specific. public static var serviceNotFoundException: Self { .init(.serviceNotFoundException) } - /// The execute command cannot run. This error can be caused by any of the following configuration - /// issues: Incorrect IAM permissions The SSM agent is not installed or is not running There is an interface Amazon VPC endpoint for Amazon ECS, but there is not one for Systems - /// Manager Session Manager For information about how to troubleshoot the issues, see Troubleshooting issues with ECS Exec in - /// the Amazon Elastic Container Service Developer Guide. + /// The execute command cannot run. This error can be caused by any of the following + /// configuration issues: Incorrect IAM permissions The SSM agent is not installed or is not running There is an interface Amazon VPC endpoint for Amazon ECS, but there is not one for + /// Systems Manager Session Manager For information about how to troubleshoot the issues, see Troubleshooting issues with ECS + /// Exec in the Amazon Elastic Container Service Developer Guide. public static var targetNotConnectedException: Self { .init(.targetNotConnectedException) } - /// The specified target wasn't found. You can view your available container instances with ListContainerInstances. Amazon ECS container instances are cluster-specific and + /// The specified target wasn't found. You can view your available container instances + /// with ListContainerInstances. Amazon ECS container instances are cluster-specific and /// Region-specific. public static var targetNotFoundException: Self { .init(.targetNotFoundException) } - /// The specified task set wasn't found. You can view your available task sets with DescribeTaskSets. Task sets are specific to each cluster, service and Region. + /// The specified task set wasn't found. You can view your available task sets with DescribeTaskSets. Task sets are specific to each cluster, service and + /// Region. public static var taskSetNotFoundException: Self { .init(.taskSetNotFoundException) } /// The specified task isn't supported in this Region. public static var unsupportedFeatureException: Self { .init(.unsupportedFeatureException) } - /// There's already a current Amazon ECS container agent update in progress on the container instance that's - /// specified. If the container agent becomes disconnected while it's in a transitional stage, such as - /// PENDING or STAGING, the update process can get stuck in that state. - /// However, when the agent reconnects, it resumes where it stopped previously. + /// There's already a current Amazon ECS container agent update in progress on the container + /// instance that's specified. If the container agent becomes disconnected while it's in a + /// transitional stage, such as PENDING or STAGING, the update + /// process can get stuck in that state. However, when the agent reconnects, it resumes + /// where it stopped previously. public static var updateInProgressException: Self { .init(.updateInProgressException) } } diff --git a/Sources/Soto/Services/EKS/EKS_api.swift b/Sources/Soto/Services/EKS/EKS_api.swift index 897ebb2194..d1a97599c4 100644 --- a/Sources/Soto/Services/EKS/EKS_api.swift +++ b/Sources/Soto/Services/EKS/EKS_api.swift @@ -524,6 +524,7 @@ public struct EKS: AWSService { /// - labels: The Kubernetes labels to apply to the nodes in the node group when they are created. /// - launchTemplate: An object representing a node group's launch template specification. When using this object, don't directly specify instanceTypes, diskSize, or remoteAccess. Make sure that the launch template meets the requirements in launchTemplateSpecification. Also refer to Customizing managed nodes with launch templates in the Amazon EKS User Guide. /// - nodegroupName: The unique name to give your node group. + /// - nodeRepairConfig: The node auto repair configuration for the node group. /// - nodeRole: The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The Amazon EKS worker node kubelet daemon makes calls to Amazon Web Services APIs on your behalf. Nodes receive permissions for these API calls through an IAM instance profile and associated policies. Before you can launch nodes and register them into a cluster, you must create an IAM role for those nodes to use when they are launched. For more information, see Amazon EKS node IAM role in the Amazon EKS User Guide . If you specify launchTemplate, then don't specify IamInstanceProfile in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide. /// - releaseVersion: The AMI version of the Amazon EKS optimized AMI to use with your node group. By default, the latest available AMI version for the node group's current Kubernetes version is used. For information about Linux versions, see Amazon EKS optimized Amazon Linux AMI versions in the Amazon EKS User Guide. Amazon EKS managed node groups support the November 2022 and later releases of the Windows AMIs. For information about Windows versions, see Amazon EKS optimized Windows AMI versions in the Amazon EKS User Guide. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify releaseVersion, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide. /// - remoteAccess: The remote access configuration to use with your node group. For Linux, the protocol is SSH. For Windows, the protocol is RDP. If you specify launchTemplate, then don't specify remoteAccess, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide. @@ -545,6 +546,7 @@ public struct EKS: AWSService { labels: [String: String]? = nil, launchTemplate: LaunchTemplateSpecification? = nil, nodegroupName: String, + nodeRepairConfig: NodeRepairConfig? = nil, nodeRole: String, releaseVersion: String? = nil, remoteAccess: RemoteAccessConfig? = nil, @@ -566,6 +568,7 @@ public struct EKS: AWSService { labels: labels, launchTemplate: launchTemplate, nodegroupName: nodegroupName, + nodeRepairConfig: nodeRepairConfig, nodeRole: nodeRole, releaseVersion: releaseVersion, remoteAccess: remoteAccess, @@ -2148,6 +2151,7 @@ public struct EKS: AWSService { /// - clusterName: The name of your cluster. /// - labels: The Kubernetes labels to apply to the nodes in the node group after the update. /// - nodegroupName: The name of the managed node group to update. + /// - nodeRepairConfig: The node auto repair configuration for the node group. /// - scalingConfig: The scaling configuration details for the Auto Scaling group after the update. /// - taints: The Kubernetes taints to be applied to the nodes in the node group after the update. For more information, see Node taints on managed node groups. /// - updateConfig: The node group update configuration. @@ -2158,6 +2162,7 @@ public struct EKS: AWSService { clusterName: String, labels: UpdateLabelsPayload? = nil, nodegroupName: String, + nodeRepairConfig: NodeRepairConfig? = nil, scalingConfig: NodegroupScalingConfig? = nil, taints: UpdateTaintsPayload? = nil, updateConfig: NodegroupUpdateConfig? = nil, @@ -2168,6 +2173,7 @@ public struct EKS: AWSService { clusterName: clusterName, labels: labels, nodegroupName: nodegroupName, + nodeRepairConfig: nodeRepairConfig, scalingConfig: scalingConfig, taints: taints, updateConfig: updateConfig diff --git a/Sources/Soto/Services/EKS/EKS_shapes.swift b/Sources/Soto/Services/EKS/EKS_shapes.swift index bfc5793cf6..1d59f812de 100644 --- a/Sources/Soto/Services/EKS/EKS_shapes.swift +++ b/Sources/Soto/Services/EKS/EKS_shapes.swift @@ -322,6 +322,7 @@ extension EKS { case maxUnavailable = "MaxUnavailable" case maxUnavailablePercentage = "MaxUnavailablePercentage" case minSize = "MinSize" + case nodeRepairEnabled = "NodeRepairEnabled" case platformVersion = "PlatformVersion" case podIdentityAssociations = "PodIdentityAssociations" case publicAccessCidrs = "PublicAccessCidrs" @@ -1674,6 +1675,8 @@ extension EKS { public let launchTemplate: LaunchTemplateSpecification? /// The unique name to give your node group. public let nodegroupName: String + /// The node auto repair configuration for the node group. + public let nodeRepairConfig: NodeRepairConfig? /// The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The Amazon EKS worker node kubelet daemon makes calls to Amazon Web Services APIs on your behalf. Nodes receive permissions for these API calls through an IAM instance profile and associated policies. Before you can launch nodes and register them into a cluster, you must create an IAM role for those nodes to use when they are launched. For more information, see Amazon EKS node IAM role in the Amazon EKS User Guide . If you specify launchTemplate, then don't specify IamInstanceProfile in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide. public let nodeRole: String /// The AMI version of the Amazon EKS optimized AMI to use with your node group. By default, the latest available AMI version for the node group's current Kubernetes version is used. For information about Linux versions, see Amazon EKS optimized Amazon Linux AMI versions in the Amazon EKS User Guide. Amazon EKS managed node groups support the November 2022 and later releases of the Windows AMIs. For information about Windows versions, see Amazon EKS optimized Windows AMI versions in the Amazon EKS User Guide. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify releaseVersion, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide. @@ -1694,7 +1697,7 @@ extension EKS { public let version: String? @inlinable - public init(amiType: AMITypes? = nil, capacityType: CapacityTypes? = nil, clientRequestToken: String? = CreateNodegroupRequest.idempotencyToken(), clusterName: String, diskSize: Int? = nil, instanceTypes: [String]? = nil, labels: [String: String]? = nil, launchTemplate: LaunchTemplateSpecification? = nil, nodegroupName: String, nodeRole: String, releaseVersion: String? = nil, remoteAccess: RemoteAccessConfig? = nil, scalingConfig: NodegroupScalingConfig? = nil, subnets: [String], tags: [String: String]? = nil, taints: [Taint]? = nil, updateConfig: NodegroupUpdateConfig? = nil, version: String? = nil) { + public init(amiType: AMITypes? = nil, capacityType: CapacityTypes? = nil, clientRequestToken: String? = CreateNodegroupRequest.idempotencyToken(), clusterName: String, diskSize: Int? = nil, instanceTypes: [String]? = nil, labels: [String: String]? = nil, launchTemplate: LaunchTemplateSpecification? = nil, nodegroupName: String, nodeRepairConfig: NodeRepairConfig? = nil, nodeRole: String, releaseVersion: String? = nil, remoteAccess: RemoteAccessConfig? = nil, scalingConfig: NodegroupScalingConfig? = nil, subnets: [String], tags: [String: String]? = nil, taints: [Taint]? = nil, updateConfig: NodegroupUpdateConfig? = nil, version: String? = nil) { self.amiType = amiType self.capacityType = capacityType self.clientRequestToken = clientRequestToken @@ -1704,6 +1707,7 @@ extension EKS { self.labels = labels self.launchTemplate = launchTemplate self.nodegroupName = nodegroupName + self.nodeRepairConfig = nodeRepairConfig self.nodeRole = nodeRole self.releaseVersion = releaseVersion self.remoteAccess = remoteAccess @@ -1727,6 +1731,7 @@ extension EKS { try container.encodeIfPresent(self.labels, forKey: .labels) try container.encodeIfPresent(self.launchTemplate, forKey: .launchTemplate) try container.encode(self.nodegroupName, forKey: .nodegroupName) + try container.encodeIfPresent(self.nodeRepairConfig, forKey: .nodeRepairConfig) try container.encode(self.nodeRole, forKey: .nodeRole) try container.encodeIfPresent(self.releaseVersion, forKey: .releaseVersion) try container.encodeIfPresent(self.remoteAccess, forKey: .remoteAccess) @@ -1768,6 +1773,7 @@ extension EKS { case labels = "labels" case launchTemplate = "launchTemplate" case nodegroupName = "nodegroupName" + case nodeRepairConfig = "nodeRepairConfig" case nodeRole = "nodeRole" case releaseVersion = "releaseVersion" case remoteAccess = "remoteAccess" @@ -3977,6 +3983,20 @@ extension EKS { } } + public struct NodeRepairConfig: AWSEncodableShape & AWSDecodableShape { + /// Specifies whether to enable node auto repair for the node group. Node auto repair is disabled by default. + public let enabled: Bool? + + @inlinable + public init(enabled: Bool? = nil) { + self.enabled = enabled + } + + private enum CodingKeys: String, CodingKey { + case enabled = "enabled" + } + } + public struct Nodegroup: AWSDecodableShape { /// If the node group was deployed using a launch template with a custom AMI, then this is CUSTOM. For node groups that weren't deployed using a launch template, this is the AMI type that was specified in the node group configuration. public let amiType: AMITypes? @@ -4002,6 +4022,8 @@ extension EKS { public let nodegroupArn: String? /// The name associated with an Amazon EKS managed node group. public let nodegroupName: String? + /// The node auto repair configuration for the node group. + public let nodeRepairConfig: NodeRepairConfig? /// The IAM role associated with your node group. The Amazon EKS node kubelet daemon makes calls to Amazon Web Services APIs on your behalf. Nodes receive permissions for these API calls through an IAM instance profile and associated policies. public let nodeRole: String? /// If the node group was deployed using a launch template with a custom AMI, then this is the AMI ID that was specified in the launch template. For node groups that weren't deployed using a launch template, this is the version of the Amazon EKS optimized AMI that the node group was deployed with. @@ -4026,7 +4048,7 @@ extension EKS { public let version: String? @inlinable - public init(amiType: AMITypes? = nil, capacityType: CapacityTypes? = nil, clusterName: String? = nil, createdAt: Date? = nil, diskSize: Int? = nil, health: NodegroupHealth? = nil, instanceTypes: [String]? = nil, labels: [String: String]? = nil, launchTemplate: LaunchTemplateSpecification? = nil, modifiedAt: Date? = nil, nodegroupArn: String? = nil, nodegroupName: String? = nil, nodeRole: String? = nil, releaseVersion: String? = nil, remoteAccess: RemoteAccessConfig? = nil, resources: NodegroupResources? = nil, scalingConfig: NodegroupScalingConfig? = nil, status: NodegroupStatus? = nil, subnets: [String]? = nil, tags: [String: String]? = nil, taints: [Taint]? = nil, updateConfig: NodegroupUpdateConfig? = nil, version: String? = nil) { + public init(amiType: AMITypes? = nil, capacityType: CapacityTypes? = nil, clusterName: String? = nil, createdAt: Date? = nil, diskSize: Int? = nil, health: NodegroupHealth? = nil, instanceTypes: [String]? = nil, labels: [String: String]? = nil, launchTemplate: LaunchTemplateSpecification? = nil, modifiedAt: Date? = nil, nodegroupArn: String? = nil, nodegroupName: String? = nil, nodeRepairConfig: NodeRepairConfig? = nil, nodeRole: String? = nil, releaseVersion: String? = nil, remoteAccess: RemoteAccessConfig? = nil, resources: NodegroupResources? = nil, scalingConfig: NodegroupScalingConfig? = nil, status: NodegroupStatus? = nil, subnets: [String]? = nil, tags: [String: String]? = nil, taints: [Taint]? = nil, updateConfig: NodegroupUpdateConfig? = nil, version: String? = nil) { self.amiType = amiType self.capacityType = capacityType self.clusterName = clusterName @@ -4039,6 +4061,7 @@ extension EKS { self.modifiedAt = modifiedAt self.nodegroupArn = nodegroupArn self.nodegroupName = nodegroupName + self.nodeRepairConfig = nodeRepairConfig self.nodeRole = nodeRole self.releaseVersion = releaseVersion self.remoteAccess = remoteAccess @@ -4065,6 +4088,7 @@ extension EKS { case modifiedAt = "modifiedAt" case nodegroupArn = "nodegroupArn" case nodegroupName = "nodegroupName" + case nodeRepairConfig = "nodeRepairConfig" case nodeRole = "nodeRole" case releaseVersion = "releaseVersion" case remoteAccess = "remoteAccess" @@ -4500,9 +4524,9 @@ extension EKS { } public struct RemoteNetworkConfigRequest: AWSEncodableShape { - /// The list of network CIDRs that can contain hybrid nodes. + /// The list of network CIDRs that can contain hybrid nodes. These CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator. Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16). It must satisfy the following requirements: Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported. Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range. Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including Transit Gateway, Site-to-Site VPN, or Direct Connect. Each host must allow outbound connection to the EKS cluster control plane on TCP ports 443 and 10250. Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations. Each host must allow TCP and UDP network connectivity to and from other hosts that are running CoreDNS on UDP port 53 for service and pod DNS names. public let remoteNodeNetworks: [RemoteNodeNetwork]? - /// The list of network CIDRs that can contain pods that run Kubernetes webhooks on hybrid nodes. + /// The list of network CIDRs that can contain pods that run Kubernetes webhooks on hybrid nodes. These CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations. Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16). It must satisfy the following requirements: Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported. Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range. public let remotePodNetworks: [RemotePodNetwork]? @inlinable @@ -4541,7 +4565,7 @@ extension EKS { } public struct RemoteNodeNetwork: AWSEncodableShape & AWSDecodableShape { - /// A network CIDR that can contain hybrid nodes. + /// A network CIDR that can contain hybrid nodes. These CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator. Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16). It must satisfy the following requirements: Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported. Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range. Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including Transit Gateway, Site-to-Site VPN, or Direct Connect. Each host must allow outbound connection to the EKS cluster control plane on TCP ports 443 and 10250. Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations. Each host must allow TCP and UDP network connectivity to and from other hosts that are running CoreDNS on UDP port 53 for service and pod DNS names. public let cidrs: [String]? @inlinable @@ -4555,7 +4579,7 @@ extension EKS { } public struct RemotePodNetwork: AWSEncodableShape & AWSDecodableShape { - /// A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes. + /// A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes. These CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations. Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16). It must satisfy the following requirements: Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported. Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range. public let cidrs: [String]? @inlinable @@ -5073,6 +5097,8 @@ extension EKS { public let labels: UpdateLabelsPayload? /// The name of the managed node group to update. public let nodegroupName: String + /// The node auto repair configuration for the node group. + public let nodeRepairConfig: NodeRepairConfig? /// The scaling configuration details for the Auto Scaling group after the update. public let scalingConfig: NodegroupScalingConfig? /// The Kubernetes taints to be applied to the nodes in the node group after the update. For more information, see Node taints on managed node groups. @@ -5081,11 +5107,12 @@ extension EKS { public let updateConfig: NodegroupUpdateConfig? @inlinable - public init(clientRequestToken: String? = UpdateNodegroupConfigRequest.idempotencyToken(), clusterName: String, labels: UpdateLabelsPayload? = nil, nodegroupName: String, scalingConfig: NodegroupScalingConfig? = nil, taints: UpdateTaintsPayload? = nil, updateConfig: NodegroupUpdateConfig? = nil) { + public init(clientRequestToken: String? = UpdateNodegroupConfigRequest.idempotencyToken(), clusterName: String, labels: UpdateLabelsPayload? = nil, nodegroupName: String, nodeRepairConfig: NodeRepairConfig? = nil, scalingConfig: NodegroupScalingConfig? = nil, taints: UpdateTaintsPayload? = nil, updateConfig: NodegroupUpdateConfig? = nil) { self.clientRequestToken = clientRequestToken self.clusterName = clusterName self.labels = labels self.nodegroupName = nodegroupName + self.nodeRepairConfig = nodeRepairConfig self.scalingConfig = scalingConfig self.taints = taints self.updateConfig = updateConfig @@ -5098,6 +5125,7 @@ extension EKS { request.encodePath(self.clusterName, key: "clusterName") try container.encodeIfPresent(self.labels, forKey: .labels) request.encodePath(self.nodegroupName, key: "nodegroupName") + try container.encodeIfPresent(self.nodeRepairConfig, forKey: .nodeRepairConfig) try container.encodeIfPresent(self.scalingConfig, forKey: .scalingConfig) try container.encodeIfPresent(self.taints, forKey: .taints) try container.encodeIfPresent(self.updateConfig, forKey: .updateConfig) @@ -5113,6 +5141,7 @@ extension EKS { private enum CodingKeys: String, CodingKey { case clientRequestToken = "clientRequestToken" case labels = "labels" + case nodeRepairConfig = "nodeRepairConfig" case scalingConfig = "scalingConfig" case taints = "taints" case updateConfig = "updateConfig" diff --git a/Sources/Soto/Services/EMRServerless/EMRServerless_api.swift b/Sources/Soto/Services/EMRServerless/EMRServerless_api.swift index ec9800a870..078bfe2c70 100644 --- a/Sources/Soto/Services/EMRServerless/EMRServerless_api.swift +++ b/Sources/Soto/Services/EMRServerless/EMRServerless_api.swift @@ -275,18 +275,21 @@ public struct EMRServerless: AWSService { /// Creates and returns a URL that you can use to access the application UIs for a job run. For jobs in a running state, the application UI is a live user interface such as the Spark or Tez web UI. For completed jobs, the application UI is a persistent application user interface such as the Spark History Server or persistent Tez UI. The URL is valid for one hour after you generate it. To access the application UI after that hour elapses, you must invoke the API again to generate a new URL. /// /// Parameters: + /// - accessSystemProfileLogs: Allows access to system profile logs for Lake Formation-enabled jobs. Default is false. /// - applicationId: The ID of the application. /// - attempt: An optimal parameter that indicates the amount of attempts for the job. If not specified, this value defaults to the attempt of the latest job. /// - jobRunId: The ID of the job run. /// - logger: Logger use during operation @inlinable public func getDashboardForJobRun( + accessSystemProfileLogs: Bool? = nil, applicationId: String, attempt: Int? = nil, jobRunId: String, logger: Logger = AWSClient.loggingDisabled ) async throws -> GetDashboardForJobRunResponse { let input = GetDashboardForJobRunRequest( + accessSystemProfileLogs: accessSystemProfileLogs, applicationId: applicationId, attempt: attempt, jobRunId: jobRunId diff --git a/Sources/Soto/Services/EMRServerless/EMRServerless_shapes.swift b/Sources/Soto/Services/EMRServerless/EMRServerless_shapes.swift index 9d43383cf2..69341fd79d 100644 --- a/Sources/Soto/Services/EMRServerless/EMRServerless_shapes.swift +++ b/Sources/Soto/Services/EMRServerless/EMRServerless_shapes.swift @@ -669,6 +669,8 @@ extension EMRServerless { } public struct GetDashboardForJobRunRequest: AWSEncodableShape { + /// Allows access to system profile logs for Lake Formation-enabled jobs. Default is false. + public let accessSystemProfileLogs: Bool? /// The ID of the application. public let applicationId: String /// An optimal parameter that indicates the amount of attempts for the job. If not specified, this value defaults to the attempt of the latest job. @@ -677,7 +679,8 @@ extension EMRServerless { public let jobRunId: String @inlinable - public init(applicationId: String, attempt: Int? = nil, jobRunId: String) { + public init(accessSystemProfileLogs: Bool? = nil, applicationId: String, attempt: Int? = nil, jobRunId: String) { + self.accessSystemProfileLogs = accessSystemProfileLogs self.applicationId = applicationId self.attempt = attempt self.jobRunId = jobRunId @@ -686,6 +689,7 @@ extension EMRServerless { public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.accessSystemProfileLogs, key: "accessSystemProfileLogs") request.encodePath(self.applicationId, key: "applicationId") request.encodeQuery(self.attempt, key: "attempt") request.encodePath(self.jobRunId, key: "jobRunId") @@ -832,7 +836,7 @@ extension EMRServerless { public func validate(name: String) throws { try self.validate(self.imageUri, name: "imageUri", parent: name, max: 1024) try self.validate(self.imageUri, name: "imageUri", parent: name, min: 1) - try self.validate(self.imageUri, name: "imageUri", parent: name, pattern: "^([a-z0-9]+[a-z0-9-.]*)\\/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*\\/)*[a-z0-9]+(?:[._-][a-z0-9]+)*)(?:\\:([a-zA-Z0-9_][a-zA-Z0-9-._]{0,299})|@(sha256:[0-9a-f]{64}))$") + try self.validate(self.imageUri, name: "imageUri", parent: name, pattern: "^([0-9]{12})\\.dkr\\.ecr\\.([a-z0-9-]+).([a-z0-9._-]+)\\/((?:[a-z0-9]+(?:[-._][a-z0-9]+)*/)*[a-z0-9]+(?:[-._][a-z0-9]+)*)(?::([a-zA-Z0-9_]+[a-zA-Z0-9-._]*)|@(sha256:[0-9a-f]{64}))$") } private enum CodingKeys: String, CodingKey { diff --git a/Sources/Soto/Services/Finspace/Finspace_shapes.swift b/Sources/Soto/Services/Finspace/Finspace_shapes.swift index 5e590317fe..489041d55f 100644 --- a/Sources/Soto/Services/Finspace/Finspace_shapes.swift +++ b/Sources/Soto/Services/Finspace/Finspace_shapes.swift @@ -3049,7 +3049,7 @@ extension Finspace { try self.validate(self.key, name: "key", parent: name, pattern: "^(?![Aa][Ww][Ss])(s|([a-zA-Z][a-zA-Z0-9_]+))|(AWS_ZIP_DEFAULT)$") try self.validate(self.value, name: "value", parent: name, max: 1024) try self.validate(self.value, name: "value", parent: name, min: 1) - try self.validate(self.value, name: "value", parent: name, pattern: "^[a-zA-Z0-9_:./,]+$") + try self.validate(self.value, name: "value", parent: name, pattern: "^[a-zA-Z0-9_:./,; ]+$") } private enum CodingKeys: String, CodingKey { diff --git a/Sources/Soto/Services/Glue/Glue_api.swift b/Sources/Soto/Services/Glue/Glue_api.swift index eb1d205cf0..1ac60a7196 100644 --- a/Sources/Soto/Services/Glue/Glue_api.swift +++ b/Sources/Soto/Services/Glue/Glue_api.swift @@ -1474,7 +1474,7 @@ public struct Glue: AWSService { /// - sourceControlDetails: The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a remote repository. /// - tags: The tags to use with this job. You may use tags to limit access to the job. For more information about tags in Glue, see Amazon Web Services Tags in Glue in the developer guide. /// - timeout: The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours) for batch jobs. Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days. - /// - workerType: The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler. + /// - workerType: The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler. /// - logger: Logger use during operation @inlinable public func createJob( @@ -1853,7 +1853,7 @@ public struct Glue: AWSService { /// - securityConfiguration: The name of the SecurityConfiguration structure to be used with the session /// - tags: The map of key value pairs (tags) belonging to the session. /// - timeout: The number of minutes before session times out. Default for Spark ETL jobs is 48 hours (2880 minutes), the maximum session lifetime for this job type. Consult the documentation for other job types. - /// - workerType: The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X for Ray notebooks. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler. + /// - workerType: The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X for Ray notebooks. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler. /// - logger: Logger use during operation @inlinable public func createSession( @@ -1979,7 +1979,7 @@ public struct Glue: AWSService { return try await self.createTableOptimizer(input, logger: logger) } - /// Creates a new trigger. + /// Creates a new trigger. Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to keep them within the Job. @Sendable @inlinable public func createTrigger(_ input: CreateTriggerRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateTriggerResponse { @@ -1992,7 +1992,7 @@ public struct Glue: AWSService { logger: logger ) } - /// Creates a new trigger. + /// Creates a new trigger. Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to keep them within the Job. /// /// Parameters: /// - actions: The actions initiated by this trigger when it fires. @@ -2124,7 +2124,7 @@ public struct Glue: AWSService { /// Creates a new workflow. /// /// Parameters: - /// - defaultRunProperties: A collection of properties to be used as part of each execution of the workflow. + /// - defaultRunProperties: A collection of properties to be used as part of each execution of the workflow. Run properties may be logged. Do not pass plaintext secrets as properties. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to use them within the workflow run. /// - description: A description of the workflow. /// - maxConcurrentRuns: You can use this parameter to prevent unwanted multiple updates to data, to control costs, or in some cases, to prevent exceeding the maximum number of concurrent runs of any of the component jobs. If you leave this parameter blank, there is no limit to the number of concurrent workflow runs. /// - name: The name to be assigned to the workflow. It should be unique within your account. @@ -4478,7 +4478,7 @@ public struct Glue: AWSService { return try await self.getJobBookmark(input, logger: logger) } - /// Retrieves the metadata for a given job run. Job run history is accessible for 90 days for your workflow and job run. + /// Retrieves the metadata for a given job run. Job run history is accessible for 365 days for your workflow and job run. @Sendable @inlinable public func getJobRun(_ input: GetJobRunRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetJobRunResponse { @@ -4491,7 +4491,7 @@ public struct Glue: AWSService { logger: logger ) } - /// Retrieves the metadata for a given job run. Job run history is accessible for 90 days for your workflow and job run. + /// Retrieves the metadata for a given job run. Job run history is accessible for 365 days for your workflow and job run. /// /// Parameters: /// - jobName: Name of the job definition being run. @@ -4513,7 +4513,7 @@ public struct Glue: AWSService { return try await self.getJobRun(input, logger: logger) } - /// Retrieves metadata for all runs of a given job definition. + /// Retrieves metadata for all runs of a given job definition. GetJobRuns returns the job runs in chronological order, with the newest jobs returned first. @Sendable @inlinable public func getJobRuns(_ input: GetJobRunsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetJobRunsResponse { @@ -4526,7 +4526,7 @@ public struct Glue: AWSService { logger: logger ) } - /// Retrieves metadata for all runs of a given job definition. + /// Retrieves metadata for all runs of a given job definition. GetJobRuns returns the job runs in chronological order, with the newest jobs returned first. /// /// Parameters: /// - jobName: The name of the job definition for which to retrieve all job runs. @@ -7133,7 +7133,7 @@ public struct Glue: AWSService { /// Parameters: /// - name: Name of the workflow which was run. /// - runId: The ID of the workflow run for which the run properties should be updated. - /// - runProperties: The properties to put for the specified run. + /// - runProperties: The properties to put for the specified run. Run properties may be logged. Do not pass plaintext secrets as properties. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to use them within the workflow run. /// - logger: Logger use during operation @inlinable public func putWorkflowRunProperties( @@ -7778,7 +7778,7 @@ public struct Glue: AWSService { /// - numberOfWorkers: The number of workers of a defined workerType that are allocated when a job runs. /// - securityConfiguration: The name of the SecurityConfiguration structure to be used with this job run. /// - timeout: The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job. Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days. - /// - workerType: The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler. + /// - workerType: The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler. /// - logger: Logger use during operation @inlinable public func startJobRun( @@ -7918,7 +7918,7 @@ public struct Glue: AWSService { /// /// Parameters: /// - name: The name of the workflow to start. - /// - runProperties: The workflow run properties for the new workflow run. + /// - runProperties: The workflow run properties for the new workflow run. Run properties may be logged. Do not pass plaintext secrets as properties. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to use them within the workflow run. /// - logger: Logger use during operation @inlinable public func startWorkflowRun( @@ -9208,7 +9208,7 @@ public struct Glue: AWSService { return try await self.updateTableOptimizer(input, logger: logger) } - /// Updates a trigger definition. + /// Updates a trigger definition. Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to keep them within the Job. @Sendable @inlinable public func updateTrigger(_ input: UpdateTriggerRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateTriggerResponse { @@ -9221,7 +9221,7 @@ public struct Glue: AWSService { logger: logger ) } - /// Updates a trigger definition. + /// Updates a trigger definition. Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to keep them within the Job. /// /// Parameters: /// - name: The name of the trigger to update. @@ -9329,7 +9329,7 @@ public struct Glue: AWSService { /// Updates an existing workflow. /// /// Parameters: - /// - defaultRunProperties: A collection of properties to be used as part of each execution of the workflow. + /// - defaultRunProperties: A collection of properties to be used as part of each execution of the workflow. Run properties may be logged. Do not pass plaintext secrets as properties. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to use them within the workflow run. /// - description: The description of the workflow. /// - maxConcurrentRuns: You can use this parameter to prevent unwanted multiple updates to data, to control costs, or in some cases, to prevent exceeding the maximum number of concurrent runs of any of the component jobs. If you leave this parameter blank, there is no limit to the number of concurrent workflow runs. /// - name: Name of the workflow to be updated. diff --git a/Sources/Soto/Services/Glue/Glue_shapes.swift b/Sources/Soto/Services/Glue/Glue_shapes.swift index f46394206f..3af22a0d2f 100644 --- a/Sources/Soto/Services/Glue/Glue_shapes.swift +++ b/Sources/Soto/Services/Glue/Glue_shapes.swift @@ -326,6 +326,12 @@ extension Glue { public var description: String { return self.rawValue } } + public enum DataQualityEncryptionMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case ssekms = "SSE-KMS" + public var description: String { return self.rawValue } + } + public enum DataQualityModelStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case failed = "FAILED" case running = "RUNNING" @@ -6337,7 +6343,7 @@ extension Glue { public let tags: [String: String]? /// The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours) for batch jobs. Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days. public let timeout: Int? - /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler. + /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler. public let workerType: WorkerType? @inlinable @@ -6986,7 +6992,7 @@ extension Glue { public let tags: [String: String]? /// The number of minutes before session times out. Default for Spark ETL jobs is 48 hours (2880 minutes), the maximum session lifetime for this job type. Consult the documentation for other job types. public let timeout: Int? - /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X for Ray notebooks. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler. + /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X for Ray notebooks. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler. public let workerType: WorkerType? @inlinable @@ -7359,7 +7365,7 @@ extension Glue { } public struct CreateWorkflowRequest: AWSEncodableShape { - /// A collection of properties to be used as part of each execution of the workflow. + /// A collection of properties to be used as part of each execution of the workflow. Run properties may be logged. Do not pass plaintext secrets as properties. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to use them within the workflow run. public let defaultRunProperties: [String: String]? /// A description of the workflow. public let description: String? @@ -7762,6 +7768,28 @@ extension Glue { } } + public struct DataQualityEncryption: AWSEncodableShape & AWSDecodableShape { + /// The encryption mode to use for encrypting Data Quality assets. These assets include data quality rulesets, results, statistics, anomaly detection models and observations. Valid values are SSEKMS for encryption using a customer-managed KMS key, or DISABLED. + public let dataQualityEncryptionMode: DataQualityEncryptionMode? + /// The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data. + public let kmsKeyArn: String? + + @inlinable + public init(dataQualityEncryptionMode: DataQualityEncryptionMode? = nil, kmsKeyArn: String? = nil) { + self.dataQualityEncryptionMode = dataQualityEncryptionMode + self.kmsKeyArn = kmsKeyArn + } + + public func validate(name: String) throws { + try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, pattern: "^arn:aws:kms:") + } + + private enum CodingKeys: String, CodingKey { + case dataQualityEncryptionMode = "DataQualityEncryptionMode" + case kmsKeyArn = "KmsKeyArn" + } + } + public struct DataQualityEvaluationRunAdditionalRunOptions: AWSEncodableShape & AWSDecodableShape { /// Whether or not to enable CloudWatch metrics. public let cloudWatchMetricsEnabled: Bool? @@ -10466,20 +10494,24 @@ extension Glue { public struct EncryptionConfiguration: AWSEncodableShape & AWSDecodableShape { /// The encryption configuration for Amazon CloudWatch. public let cloudWatchEncryption: CloudWatchEncryption? + /// The encryption configuration for Glue Data Quality assets. + public let dataQualityEncryption: DataQualityEncryption? /// The encryption configuration for job bookmarks. public let jobBookmarksEncryption: JobBookmarksEncryption? /// The encryption configuration for Amazon Simple Storage Service (Amazon S3) data. public let s3Encryption: [S3Encryption]? @inlinable - public init(cloudWatchEncryption: CloudWatchEncryption? = nil, jobBookmarksEncryption: JobBookmarksEncryption? = nil, s3Encryption: [S3Encryption]? = nil) { + public init(cloudWatchEncryption: CloudWatchEncryption? = nil, dataQualityEncryption: DataQualityEncryption? = nil, jobBookmarksEncryption: JobBookmarksEncryption? = nil, s3Encryption: [S3Encryption]? = nil) { self.cloudWatchEncryption = cloudWatchEncryption + self.dataQualityEncryption = dataQualityEncryption self.jobBookmarksEncryption = jobBookmarksEncryption self.s3Encryption = s3Encryption } public func validate(name: String) throws { try self.cloudWatchEncryption?.validate(name: "\(name).cloudWatchEncryption") + try self.dataQualityEncryption?.validate(name: "\(name).dataQualityEncryption") try self.jobBookmarksEncryption?.validate(name: "\(name).jobBookmarksEncryption") try self.s3Encryption?.forEach { try $0.validate(name: "\(name).s3Encryption[]") @@ -10488,6 +10520,7 @@ extension Glue { private enum CodingKeys: String, CodingKey { case cloudWatchEncryption = "CloudWatchEncryption" + case dataQualityEncryption = "DataQualityEncryption" case jobBookmarksEncryption = "JobBookmarksEncryption" case s3Encryption = "S3Encryption" } @@ -16193,7 +16226,7 @@ extension Glue { public let sourceControlDetails: SourceControlDetails? /// The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours) for batch jobs. Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days. public let timeout: Int? - /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler. + /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler. public let workerType: WorkerType? @inlinable @@ -16454,7 +16487,7 @@ extension Glue { public let timeout: Int? /// The name of the trigger that started this job run. public let triggerName: String? - /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler. + /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler. public let workerType: WorkerType? @inlinable @@ -16602,7 +16635,7 @@ extension Glue { public let sourceControlDetails: SourceControlDetails? /// The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours) for batch jobs. Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days. public let timeout: Int? - /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler. + /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler. public let workerType: WorkerType? @inlinable @@ -20173,7 +20206,7 @@ extension Glue { public let name: String /// The ID of the workflow run for which the run properties should be updated. public let runId: String - /// The properties to put for the specified run. + /// The properties to put for the specified run. Run properties may be logged. Do not pass plaintext secrets as properties. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to use them within the workflow run. public let runProperties: [String: String] @inlinable @@ -23579,7 +23612,7 @@ extension Glue { public let securityConfiguration: String? /// The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job. Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days. public let timeout: Int? - /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler. + /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler. public let workerType: WorkerType? @inlinable @@ -23768,7 +23801,7 @@ extension Glue { public struct StartWorkflowRunRequest: AWSEncodableShape { /// The name of the workflow to start. public let name: String - /// The workflow run properties for the new workflow run. + /// The workflow run properties for the new workflow run. Run properties may be logged. Do not pass plaintext secrets as properties. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to use them within the workflow run. public let runProperties: [String: String]? @inlinable @@ -27118,7 +27151,7 @@ extension Glue { } public struct UpdateWorkflowRequest: AWSEncodableShape { - /// A collection of properties to be used as part of each execution of the workflow. + /// A collection of properties to be used as part of each execution of the workflow. Run properties may be logged. Do not pass plaintext secrets as properties. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to use them within the workflow run. public let defaultRunProperties: [String: String]? /// The description of the workflow. public let description: String? diff --git a/Sources/Soto/Services/GreengrassV2/GreengrassV2_api.swift b/Sources/Soto/Services/GreengrassV2/GreengrassV2_api.swift index 5c81811cfc..6143d53332 100644 --- a/Sources/Soto/Services/GreengrassV2/GreengrassV2_api.swift +++ b/Sources/Soto/Services/GreengrassV2/GreengrassV2_api.swift @@ -737,7 +737,7 @@ public struct GreengrassV2: AWSService { return try await self.listComponents(input, logger: logger) } - /// Retrieves a paginated list of Greengrass core devices. IoT Greengrass relies on individual devices to send status updates to the Amazon Web Services Cloud. If the IoT Greengrass Core software isn't running on the device, or if device isn't connected to the Amazon Web Services Cloud, then the reported status of that device might not reflect its current status. The status timestamp indicates when the device status was last updated. Core devices send status updates at the following times: When the IoT Greengrass Core software starts When the core device receives a deployment from the Amazon Web Services Cloud When the status of any component on the core device becomes BROKEN At a regular interval that you can configure, which defaults to 24 hours For IoT Greengrass Core v2.7.0, the core device sends status updates upon local deployment and cloud deployment + /// Retrieves a paginated list of Greengrass core devices. IoT Greengrass relies on individual devices to send status updates to the Amazon Web Services Cloud. If the IoT Greengrass Core software isn't running on the device, or if device isn't connected to the Amazon Web Services Cloud, then the reported status of that device might not reflect its current status. The status timestamp indicates when the device status was last updated. Core devices send status updates at the following times: When the IoT Greengrass Core software starts When the core device receives a deployment from the Amazon Web Services Cloud For Greengrass nucleus 2.12.2 and earlier, the core device sends status updates when the status of any component on the core device becomes ERRORED or BROKEN. For Greengrass nucleus 2.12.3 and later, the core device sends status updates when the status of any component on the core device becomes ERRORED, BROKEN, RUNNING, or FINISHED. At a regular interval that you can configure, which defaults to 24 hours For IoT Greengrass Core v2.7.0, the core device sends status updates upon local deployment and cloud deployment @Sendable @inlinable public func listCoreDevices(_ input: ListCoreDevicesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListCoreDevicesResponse { @@ -750,11 +750,12 @@ public struct GreengrassV2: AWSService { logger: logger ) } - /// Retrieves a paginated list of Greengrass core devices. IoT Greengrass relies on individual devices to send status updates to the Amazon Web Services Cloud. If the IoT Greengrass Core software isn't running on the device, or if device isn't connected to the Amazon Web Services Cloud, then the reported status of that device might not reflect its current status. The status timestamp indicates when the device status was last updated. Core devices send status updates at the following times: When the IoT Greengrass Core software starts When the core device receives a deployment from the Amazon Web Services Cloud When the status of any component on the core device becomes BROKEN At a regular interval that you can configure, which defaults to 24 hours For IoT Greengrass Core v2.7.0, the core device sends status updates upon local deployment and cloud deployment + /// Retrieves a paginated list of Greengrass core devices. IoT Greengrass relies on individual devices to send status updates to the Amazon Web Services Cloud. If the IoT Greengrass Core software isn't running on the device, or if device isn't connected to the Amazon Web Services Cloud, then the reported status of that device might not reflect its current status. The status timestamp indicates when the device status was last updated. Core devices send status updates at the following times: When the IoT Greengrass Core software starts When the core device receives a deployment from the Amazon Web Services Cloud For Greengrass nucleus 2.12.2 and earlier, the core device sends status updates when the status of any component on the core device becomes ERRORED or BROKEN. For Greengrass nucleus 2.12.3 and later, the core device sends status updates when the status of any component on the core device becomes ERRORED, BROKEN, RUNNING, or FINISHED. At a regular interval that you can configure, which defaults to 24 hours For IoT Greengrass Core v2.7.0, the core device sends status updates upon local deployment and cloud deployment /// /// Parameters: /// - maxResults: The maximum number of results to be returned per paginated request. /// - nextToken: The token to be used for the next set of paginated results. + /// - runtime: The runtime to be used by the core device. The runtime can be: aws_nucleus_classic aws_nucleus_lite /// - status: The core device status by which to filter. If you specify this parameter, the list includes only core devices that have this status. Choose one of the following options: HEALTHY – The IoT Greengrass Core software and all components run on the core device without issue. UNHEALTHY – The IoT Greengrass Core software or a component is in a failed state on the core device. /// - thingGroupArn: The ARN of the IoT thing group by which to filter. If you specify this parameter, the list includes only core devices that have successfully deployed a deployment that targets the thing group. When you remove a core device from a thing group, the list continues to include that core device. /// - logger: Logger use during operation @@ -762,6 +763,7 @@ public struct GreengrassV2: AWSService { public func listCoreDevices( maxResults: Int? = nil, nextToken: String? = nil, + runtime: String? = nil, status: CoreDeviceStatus? = nil, thingGroupArn: String? = nil, logger: Logger = AWSClient.loggingDisabled @@ -769,6 +771,7 @@ public struct GreengrassV2: AWSService { let input = ListCoreDevicesRequest( maxResults: maxResults, nextToken: nextToken, + runtime: runtime, status: status, thingGroupArn: thingGroupArn ) @@ -1193,18 +1196,21 @@ extension GreengrassV2 { /// /// - Parameters: /// - maxResults: The maximum number of results to be returned per paginated request. + /// - runtime: The runtime to be used by the core device. The runtime can be: aws_nucleus_classic aws_nucleus_lite /// - status: The core device status by which to filter. If you specify this parameter, the list includes only core devices that have this status. Choose one of the following options: HEALTHY – The IoT Greengrass Core software and all components run on the core device without issue. UNHEALTHY – The IoT Greengrass Core software or a component is in a failed state on the core device. /// - thingGroupArn: The ARN of the IoT thing group by which to filter. If you specify this parameter, the list includes only core devices that have successfully deployed a deployment that targets the thing group. When you remove a core device from a thing group, the list continues to include that core device. /// - logger: Logger used for logging @inlinable public func listCoreDevicesPaginator( maxResults: Int? = nil, + runtime: String? = nil, status: CoreDeviceStatus? = nil, thingGroupArn: String? = nil, logger: Logger = AWSClient.loggingDisabled ) -> AWSClient.PaginatorSequence { let input = ListCoreDevicesRequest( maxResults: maxResults, + runtime: runtime, status: status, thingGroupArn: thingGroupArn ) @@ -1371,6 +1377,7 @@ extension GreengrassV2.ListCoreDevicesRequest: AWSPaginateToken { return .init( maxResults: self.maxResults, nextToken: token, + runtime: self.runtime, status: self.status, thingGroupArn: self.thingGroupArn ) diff --git a/Sources/Soto/Services/GreengrassV2/GreengrassV2_shapes.swift b/Sources/Soto/Services/GreengrassV2/GreengrassV2_shapes.swift index 35e73119f5..33cdd25578 100644 --- a/Sources/Soto/Services/GreengrassV2/GreengrassV2_shapes.swift +++ b/Sources/Soto/Services/GreengrassV2/GreengrassV2_shapes.swift @@ -697,23 +697,35 @@ extension GreengrassV2 { } public struct CoreDevice: AWSDecodableShape { + /// The computer architecture of the core device. + public let architecture: String? /// The name of the core device. This is also the name of the IoT thing. public let coreDeviceThingName: String? /// The time at which the core device's status last updated, expressed in ISO 8601 format. public let lastStatusUpdateTimestamp: Date? + /// The operating system platform that the core device runs. + public let platform: String? + /// The runtime for the core device. The runtime can be: aws_nucleus_classic aws_nucleus_lite + public let runtime: String? /// The status of the core device. Core devices can have the following statuses: HEALTHY – The IoT Greengrass Core software and all components run on the core device without issue. UNHEALTHY – The IoT Greengrass Core software or a component is in a failed state on the core device. public let status: CoreDeviceStatus? @inlinable - public init(coreDeviceThingName: String? = nil, lastStatusUpdateTimestamp: Date? = nil, status: CoreDeviceStatus? = nil) { + public init(architecture: String? = nil, coreDeviceThingName: String? = nil, lastStatusUpdateTimestamp: Date? = nil, platform: String? = nil, runtime: String? = nil, status: CoreDeviceStatus? = nil) { + self.architecture = architecture self.coreDeviceThingName = coreDeviceThingName self.lastStatusUpdateTimestamp = lastStatusUpdateTimestamp + self.platform = platform + self.runtime = runtime self.status = status } private enum CodingKeys: String, CodingKey { + case architecture = "architecture" case coreDeviceThingName = "coreDeviceThingName" case lastStatusUpdateTimestamp = "lastStatusUpdateTimestamp" + case platform = "platform" + case runtime = "runtime" case status = "status" } } @@ -1435,18 +1447,21 @@ extension GreengrassV2 { public let lastStatusUpdateTimestamp: Date? /// The operating system platform that the core device runs. public let platform: String? + /// The runtime for the core device. The runtime can be: aws_nucleus_classic aws_nucleus_lite + public let runtime: String? /// The status of the core device. The core device status can be: HEALTHY – The IoT Greengrass Core software and all components run on the core device without issue. UNHEALTHY – The IoT Greengrass Core software or a component is in a failed state on the core device. public let status: CoreDeviceStatus? /// A list of key-value pairs that contain metadata for the resource. For more information, see Tag your resources in the IoT Greengrass V2 Developer Guide. public let tags: [String: String]? @inlinable - public init(architecture: String? = nil, coreDeviceThingName: String? = nil, coreVersion: String? = nil, lastStatusUpdateTimestamp: Date? = nil, platform: String? = nil, status: CoreDeviceStatus? = nil, tags: [String: String]? = nil) { + public init(architecture: String? = nil, coreDeviceThingName: String? = nil, coreVersion: String? = nil, lastStatusUpdateTimestamp: Date? = nil, platform: String? = nil, runtime: String? = nil, status: CoreDeviceStatus? = nil, tags: [String: String]? = nil) { self.architecture = architecture self.coreDeviceThingName = coreDeviceThingName self.coreVersion = coreVersion self.lastStatusUpdateTimestamp = lastStatusUpdateTimestamp self.platform = platform + self.runtime = runtime self.status = status self.tags = tags } @@ -1457,6 +1472,7 @@ extension GreengrassV2 { case coreVersion = "coreVersion" case lastStatusUpdateTimestamp = "lastStatusUpdateTimestamp" case platform = "platform" + case runtime = "runtime" case status = "status" case tags = "tags" } @@ -2136,15 +2152,18 @@ extension GreengrassV2 { public let maxResults: Int? /// The token to be used for the next set of paginated results. public let nextToken: String? + /// The runtime to be used by the core device. The runtime can be: aws_nucleus_classic aws_nucleus_lite + public let runtime: String? /// The core device status by which to filter. If you specify this parameter, the list includes only core devices that have this status. Choose one of the following options: HEALTHY – The IoT Greengrass Core software and all components run on the core device without issue. UNHEALTHY – The IoT Greengrass Core software or a component is in a failed state on the core device. public let status: CoreDeviceStatus? /// The ARN of the IoT thing group by which to filter. If you specify this parameter, the list includes only core devices that have successfully deployed a deployment that targets the thing group. When you remove a core device from a thing group, the list continues to include that core device. public let thingGroupArn: String? @inlinable - public init(maxResults: Int? = nil, nextToken: String? = nil, status: CoreDeviceStatus? = nil, thingGroupArn: String? = nil) { + public init(maxResults: Int? = nil, nextToken: String? = nil, runtime: String? = nil, status: CoreDeviceStatus? = nil, thingGroupArn: String? = nil) { self.maxResults = maxResults self.nextToken = nextToken + self.runtime = runtime self.status = status self.thingGroupArn = thingGroupArn } @@ -2154,6 +2173,7 @@ extension GreengrassV2 { _ = encoder.container(keyedBy: CodingKeys.self) request.encodeQuery(self.maxResults, key: "maxResults") request.encodeQuery(self.nextToken, key: "nextToken") + request.encodeQuery(self.runtime, key: "runtime") request.encodeQuery(self.status, key: "status") request.encodeQuery(self.thingGroupArn, key: "thingGroupArn") } @@ -2161,6 +2181,8 @@ extension GreengrassV2 { public func validate(name: String) throws { try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.runtime, name: "runtime", parent: name, max: 255) + try self.validate(self.runtime, name: "runtime", parent: name, min: 1) try self.validate(self.thingGroupArn, name: "thingGroupArn", parent: name, pattern: "^arn:[^:]*:iot:[^:]*:[0-9]+:thinggroup/.+$") } diff --git a/Sources/Soto/Services/GuardDuty/GuardDuty_api.swift b/Sources/Soto/Services/GuardDuty/GuardDuty_api.swift index e445ea930c..44c789487c 100644 --- a/Sources/Soto/Services/GuardDuty/GuardDuty_api.swift +++ b/Sources/Soto/Services/GuardDuty/GuardDuty_api.swift @@ -256,7 +256,7 @@ public struct GuardDuty: AWSService { /// - clientToken: The idempotency token for the create request. /// - description: The description of the filter. Valid characters include alphanumeric characters, and special characters such as hyphen, period, colon, underscore, parentheses ({ }, [ ], and ( )), forward slash, horizontal tab, vertical tab, newline, form feed, return, and whitespace. /// - detectorId: The detector ID associated with the GuardDuty account for which you want to create a filter. To find the detectorId in the current Region, see the - /// - findingCriteria: Represents the criteria to be used in the filter for querying findings. You can only use the following attributes to query findings: accountId id region severity To filter on the basis of severity, the API and CLI use the following input list for the FindingCriteria condition: Low: ["1", "2", "3"] Medium: ["4", "5", "6"] High: ["7", "8", "9"] For more information, see Severity levels for GuardDuty findings. type updatedAt Type: ISO 8601 string format: YYYY-MM-DDTHH:MM:SS.SSSZ or YYYY-MM-DDTHH:MM:SSZ depending on whether the value contains milliseconds. resource.accessKeyDetails.accessKeyId resource.accessKeyDetails.principalId resource.accessKeyDetails.userName resource.accessKeyDetails.userType resource.instanceDetails.iamInstanceProfile.id resource.instanceDetails.imageId resource.instanceDetails.instanceId resource.instanceDetails.tags.key resource.instanceDetails.tags.value resource.instanceDetails.networkInterfaces.ipv6Addresses resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress resource.instanceDetails.networkInterfaces.publicDnsName resource.instanceDetails.networkInterfaces.publicIp resource.instanceDetails.networkInterfaces.securityGroups.groupId resource.instanceDetails.networkInterfaces.securityGroups.groupName resource.instanceDetails.networkInterfaces.subnetId resource.instanceDetails.networkInterfaces.vpcId resource.instanceDetails.outpostArn resource.resourceType resource.s3BucketDetails.publicAccess.effectivePermissions resource.s3BucketDetails.name resource.s3BucketDetails.tags.key resource.s3BucketDetails.tags.value resource.s3BucketDetails.type service.action.actionType service.action.awsApiCallAction.api service.action.awsApiCallAction.callerType service.action.awsApiCallAction.errorCode service.action.awsApiCallAction.remoteIpDetails.city.cityName service.action.awsApiCallAction.remoteIpDetails.country.countryName service.action.awsApiCallAction.remoteIpDetails.ipAddressV4 service.action.awsApiCallAction.remoteIpDetails.ipAddressV6 service.action.awsApiCallAction.remoteIpDetails.organization.asn service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg service.action.awsApiCallAction.serviceName service.action.dnsRequestAction.domain service.action.dnsRequestAction.domainWithSuffix service.action.networkConnectionAction.blocked service.action.networkConnectionAction.connectionDirection service.action.networkConnectionAction.localPortDetails.port service.action.networkConnectionAction.protocol service.action.networkConnectionAction.remoteIpDetails.city.cityName service.action.networkConnectionAction.remoteIpDetails.country.countryName service.action.networkConnectionAction.remoteIpDetails.ipAddressV4 service.action.networkConnectionAction.remoteIpDetails.ipAddressV6 service.action.networkConnectionAction.remoteIpDetails.organization.asn service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg service.action.networkConnectionAction.remotePortDetails.port service.action.awsApiCallAction.remoteAccountDetails.affiliated service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV4 service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV6 service.action.kubernetesApiCallAction.namespace service.action.kubernetesApiCallAction.remoteIpDetails.organization.asn service.action.kubernetesApiCallAction.requestUri service.action.kubernetesApiCallAction.statusCode service.action.networkConnectionAction.localIpDetails.ipAddressV4 service.action.networkConnectionAction.localIpDetails.ipAddressV6 service.action.networkConnectionAction.protocol service.action.awsApiCallAction.serviceName service.action.awsApiCallAction.remoteAccountDetails.accountId service.additionalInfo.threatListName service.resourceRole resource.eksClusterDetails.name resource.kubernetesDetails.kubernetesWorkloadDetails.name resource.kubernetesDetails.kubernetesWorkloadDetails.namespace resource.kubernetesDetails.kubernetesUserDetails.username resource.kubernetesDetails.kubernetesWorkloadDetails.containers.image resource.kubernetesDetails.kubernetesWorkloadDetails.containers.imagePrefix service.ebsVolumeScanDetails.scanId service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.name service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.severity service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.filePaths.hash resource.ecsClusterDetails.name resource.ecsClusterDetails.taskDetails.containers.image resource.ecsClusterDetails.taskDetails.definitionArn resource.containerDetails.image resource.rdsDbInstanceDetails.dbInstanceIdentifier resource.rdsDbInstanceDetails.dbClusterIdentifier resource.rdsDbInstanceDetails.engine resource.rdsDbUserDetails.user resource.rdsDbInstanceDetails.tags.key resource.rdsDbInstanceDetails.tags.value service.runtimeDetails.process.executableSha256 service.runtimeDetails.process.name service.runtimeDetails.process.name resource.lambdaDetails.functionName resource.lambdaDetails.functionArn resource.lambdaDetails.tags.key resource.lambdaDetails.tags.value + /// - findingCriteria: Represents the criteria to be used in the filter for querying findings. You can only use the following attributes to query findings: accountId id region severity To filter on the basis of severity, the API and CLI use the following input list for the FindingCriteria condition: Low: ["1", "2", "3"] Medium: ["4", "5", "6"] High: ["7", "8"] Critical: ["9", "10"] For more information, see Findings severity levels in the Amazon GuardDuty User Guide. type updatedAt Type: ISO 8601 string format: YYYY-MM-DDTHH:MM:SS.SSSZ or YYYY-MM-DDTHH:MM:SSZ depending on whether the value contains milliseconds. resource.accessKeyDetails.accessKeyId resource.accessKeyDetails.principalId resource.accessKeyDetails.userName resource.accessKeyDetails.userType resource.instanceDetails.iamInstanceProfile.id resource.instanceDetails.imageId resource.instanceDetails.instanceId resource.instanceDetails.tags.key resource.instanceDetails.tags.value resource.instanceDetails.networkInterfaces.ipv6Addresses resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress resource.instanceDetails.networkInterfaces.publicDnsName resource.instanceDetails.networkInterfaces.publicIp resource.instanceDetails.networkInterfaces.securityGroups.groupId resource.instanceDetails.networkInterfaces.securityGroups.groupName resource.instanceDetails.networkInterfaces.subnetId resource.instanceDetails.networkInterfaces.vpcId resource.instanceDetails.outpostArn resource.resourceType resource.s3BucketDetails.publicAccess.effectivePermissions resource.s3BucketDetails.name resource.s3BucketDetails.tags.key resource.s3BucketDetails.tags.value resource.s3BucketDetails.type service.action.actionType service.action.awsApiCallAction.api service.action.awsApiCallAction.callerType service.action.awsApiCallAction.errorCode service.action.awsApiCallAction.remoteIpDetails.city.cityName service.action.awsApiCallAction.remoteIpDetails.country.countryName service.action.awsApiCallAction.remoteIpDetails.ipAddressV4 service.action.awsApiCallAction.remoteIpDetails.ipAddressV6 service.action.awsApiCallAction.remoteIpDetails.organization.asn service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg service.action.awsApiCallAction.serviceName service.action.dnsRequestAction.domain service.action.dnsRequestAction.domainWithSuffix service.action.networkConnectionAction.blocked service.action.networkConnectionAction.connectionDirection service.action.networkConnectionAction.localPortDetails.port service.action.networkConnectionAction.protocol service.action.networkConnectionAction.remoteIpDetails.city.cityName service.action.networkConnectionAction.remoteIpDetails.country.countryName service.action.networkConnectionAction.remoteIpDetails.ipAddressV4 service.action.networkConnectionAction.remoteIpDetails.ipAddressV6 service.action.networkConnectionAction.remoteIpDetails.organization.asn service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg service.action.networkConnectionAction.remotePortDetails.port service.action.awsApiCallAction.remoteAccountDetails.affiliated service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV4 service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV6 service.action.kubernetesApiCallAction.namespace service.action.kubernetesApiCallAction.remoteIpDetails.organization.asn service.action.kubernetesApiCallAction.requestUri service.action.kubernetesApiCallAction.statusCode service.action.networkConnectionAction.localIpDetails.ipAddressV4 service.action.networkConnectionAction.localIpDetails.ipAddressV6 service.action.networkConnectionAction.protocol service.action.awsApiCallAction.serviceName service.action.awsApiCallAction.remoteAccountDetails.accountId service.additionalInfo.threatListName service.resourceRole resource.eksClusterDetails.name resource.kubernetesDetails.kubernetesWorkloadDetails.name resource.kubernetesDetails.kubernetesWorkloadDetails.namespace resource.kubernetesDetails.kubernetesUserDetails.username resource.kubernetesDetails.kubernetesWorkloadDetails.containers.image resource.kubernetesDetails.kubernetesWorkloadDetails.containers.imagePrefix service.ebsVolumeScanDetails.scanId service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.name service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.severity service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.filePaths.hash resource.ecsClusterDetails.name resource.ecsClusterDetails.taskDetails.containers.image resource.ecsClusterDetails.taskDetails.definitionArn resource.containerDetails.image resource.rdsDbInstanceDetails.dbInstanceIdentifier resource.rdsDbInstanceDetails.dbClusterIdentifier resource.rdsDbInstanceDetails.engine resource.rdsDbUserDetails.user resource.rdsDbInstanceDetails.tags.key resource.rdsDbInstanceDetails.tags.value service.runtimeDetails.process.executableSha256 service.runtimeDetails.process.name service.runtimeDetails.process.name resource.lambdaDetails.functionName resource.lambdaDetails.functionArn resource.lambdaDetails.tags.key resource.lambdaDetails.tags.value /// - name: The name of the filter. Valid characters include period (.), underscore (_), dash (-), and alphanumeric characters. A whitespace is considered to be an invalid character. /// - rank: Specifies the position of the filter in the list of current filters. Also specifies the order in which this filter is applied to the findings. /// - tags: The tags to be added to a new filter resource. diff --git a/Sources/Soto/Services/GuardDuty/GuardDuty_shapes.swift b/Sources/Soto/Services/GuardDuty/GuardDuty_shapes.swift index a9796cf28f..0991fe5e16 100644 --- a/Sources/Soto/Services/GuardDuty/GuardDuty_shapes.swift +++ b/Sources/Soto/Services/GuardDuty/GuardDuty_shapes.swift @@ -1565,7 +1565,7 @@ extension GuardDuty { /// The detector ID associated with the GuardDuty account for which you want to create a filter. To find the detectorId in the current Region, see the /// Settings page in the GuardDuty console, or run the ListDetectors API. public let detectorId: String - /// Represents the criteria to be used in the filter for querying findings. You can only use the following attributes to query findings: accountId id region severity To filter on the basis of severity, the API and CLI use the following input list for the FindingCriteria condition: Low: ["1", "2", "3"] Medium: ["4", "5", "6"] High: ["7", "8", "9"] For more information, see Severity levels for GuardDuty findings. type updatedAt Type: ISO 8601 string format: YYYY-MM-DDTHH:MM:SS.SSSZ or YYYY-MM-DDTHH:MM:SSZ depending on whether the value contains milliseconds. resource.accessKeyDetails.accessKeyId resource.accessKeyDetails.principalId resource.accessKeyDetails.userName resource.accessKeyDetails.userType resource.instanceDetails.iamInstanceProfile.id resource.instanceDetails.imageId resource.instanceDetails.instanceId resource.instanceDetails.tags.key resource.instanceDetails.tags.value resource.instanceDetails.networkInterfaces.ipv6Addresses resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress resource.instanceDetails.networkInterfaces.publicDnsName resource.instanceDetails.networkInterfaces.publicIp resource.instanceDetails.networkInterfaces.securityGroups.groupId resource.instanceDetails.networkInterfaces.securityGroups.groupName resource.instanceDetails.networkInterfaces.subnetId resource.instanceDetails.networkInterfaces.vpcId resource.instanceDetails.outpostArn resource.resourceType resource.s3BucketDetails.publicAccess.effectivePermissions resource.s3BucketDetails.name resource.s3BucketDetails.tags.key resource.s3BucketDetails.tags.value resource.s3BucketDetails.type service.action.actionType service.action.awsApiCallAction.api service.action.awsApiCallAction.callerType service.action.awsApiCallAction.errorCode service.action.awsApiCallAction.remoteIpDetails.city.cityName service.action.awsApiCallAction.remoteIpDetails.country.countryName service.action.awsApiCallAction.remoteIpDetails.ipAddressV4 service.action.awsApiCallAction.remoteIpDetails.ipAddressV6 service.action.awsApiCallAction.remoteIpDetails.organization.asn service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg service.action.awsApiCallAction.serviceName service.action.dnsRequestAction.domain service.action.dnsRequestAction.domainWithSuffix service.action.networkConnectionAction.blocked service.action.networkConnectionAction.connectionDirection service.action.networkConnectionAction.localPortDetails.port service.action.networkConnectionAction.protocol service.action.networkConnectionAction.remoteIpDetails.city.cityName service.action.networkConnectionAction.remoteIpDetails.country.countryName service.action.networkConnectionAction.remoteIpDetails.ipAddressV4 service.action.networkConnectionAction.remoteIpDetails.ipAddressV6 service.action.networkConnectionAction.remoteIpDetails.organization.asn service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg service.action.networkConnectionAction.remotePortDetails.port service.action.awsApiCallAction.remoteAccountDetails.affiliated service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV4 service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV6 service.action.kubernetesApiCallAction.namespace service.action.kubernetesApiCallAction.remoteIpDetails.organization.asn service.action.kubernetesApiCallAction.requestUri service.action.kubernetesApiCallAction.statusCode service.action.networkConnectionAction.localIpDetails.ipAddressV4 service.action.networkConnectionAction.localIpDetails.ipAddressV6 service.action.networkConnectionAction.protocol service.action.awsApiCallAction.serviceName service.action.awsApiCallAction.remoteAccountDetails.accountId service.additionalInfo.threatListName service.resourceRole resource.eksClusterDetails.name resource.kubernetesDetails.kubernetesWorkloadDetails.name resource.kubernetesDetails.kubernetesWorkloadDetails.namespace resource.kubernetesDetails.kubernetesUserDetails.username resource.kubernetesDetails.kubernetesWorkloadDetails.containers.image resource.kubernetesDetails.kubernetesWorkloadDetails.containers.imagePrefix service.ebsVolumeScanDetails.scanId service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.name service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.severity service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.filePaths.hash resource.ecsClusterDetails.name resource.ecsClusterDetails.taskDetails.containers.image resource.ecsClusterDetails.taskDetails.definitionArn resource.containerDetails.image resource.rdsDbInstanceDetails.dbInstanceIdentifier resource.rdsDbInstanceDetails.dbClusterIdentifier resource.rdsDbInstanceDetails.engine resource.rdsDbUserDetails.user resource.rdsDbInstanceDetails.tags.key resource.rdsDbInstanceDetails.tags.value service.runtimeDetails.process.executableSha256 service.runtimeDetails.process.name service.runtimeDetails.process.name resource.lambdaDetails.functionName resource.lambdaDetails.functionArn resource.lambdaDetails.tags.key resource.lambdaDetails.tags.value + /// Represents the criteria to be used in the filter for querying findings. You can only use the following attributes to query findings: accountId id region severity To filter on the basis of severity, the API and CLI use the following input list for the FindingCriteria condition: Low: ["1", "2", "3"] Medium: ["4", "5", "6"] High: ["7", "8"] Critical: ["9", "10"] For more information, see Findings severity levels in the Amazon GuardDuty User Guide. type updatedAt Type: ISO 8601 string format: YYYY-MM-DDTHH:MM:SS.SSSZ or YYYY-MM-DDTHH:MM:SSZ depending on whether the value contains milliseconds. resource.accessKeyDetails.accessKeyId resource.accessKeyDetails.principalId resource.accessKeyDetails.userName resource.accessKeyDetails.userType resource.instanceDetails.iamInstanceProfile.id resource.instanceDetails.imageId resource.instanceDetails.instanceId resource.instanceDetails.tags.key resource.instanceDetails.tags.value resource.instanceDetails.networkInterfaces.ipv6Addresses resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress resource.instanceDetails.networkInterfaces.publicDnsName resource.instanceDetails.networkInterfaces.publicIp resource.instanceDetails.networkInterfaces.securityGroups.groupId resource.instanceDetails.networkInterfaces.securityGroups.groupName resource.instanceDetails.networkInterfaces.subnetId resource.instanceDetails.networkInterfaces.vpcId resource.instanceDetails.outpostArn resource.resourceType resource.s3BucketDetails.publicAccess.effectivePermissions resource.s3BucketDetails.name resource.s3BucketDetails.tags.key resource.s3BucketDetails.tags.value resource.s3BucketDetails.type service.action.actionType service.action.awsApiCallAction.api service.action.awsApiCallAction.callerType service.action.awsApiCallAction.errorCode service.action.awsApiCallAction.remoteIpDetails.city.cityName service.action.awsApiCallAction.remoteIpDetails.country.countryName service.action.awsApiCallAction.remoteIpDetails.ipAddressV4 service.action.awsApiCallAction.remoteIpDetails.ipAddressV6 service.action.awsApiCallAction.remoteIpDetails.organization.asn service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg service.action.awsApiCallAction.serviceName service.action.dnsRequestAction.domain service.action.dnsRequestAction.domainWithSuffix service.action.networkConnectionAction.blocked service.action.networkConnectionAction.connectionDirection service.action.networkConnectionAction.localPortDetails.port service.action.networkConnectionAction.protocol service.action.networkConnectionAction.remoteIpDetails.city.cityName service.action.networkConnectionAction.remoteIpDetails.country.countryName service.action.networkConnectionAction.remoteIpDetails.ipAddressV4 service.action.networkConnectionAction.remoteIpDetails.ipAddressV6 service.action.networkConnectionAction.remoteIpDetails.organization.asn service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg service.action.networkConnectionAction.remotePortDetails.port service.action.awsApiCallAction.remoteAccountDetails.affiliated service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV4 service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV6 service.action.kubernetesApiCallAction.namespace service.action.kubernetesApiCallAction.remoteIpDetails.organization.asn service.action.kubernetesApiCallAction.requestUri service.action.kubernetesApiCallAction.statusCode service.action.networkConnectionAction.localIpDetails.ipAddressV4 service.action.networkConnectionAction.localIpDetails.ipAddressV6 service.action.networkConnectionAction.protocol service.action.awsApiCallAction.serviceName service.action.awsApiCallAction.remoteAccountDetails.accountId service.additionalInfo.threatListName service.resourceRole resource.eksClusterDetails.name resource.kubernetesDetails.kubernetesWorkloadDetails.name resource.kubernetesDetails.kubernetesWorkloadDetails.namespace resource.kubernetesDetails.kubernetesUserDetails.username resource.kubernetesDetails.kubernetesWorkloadDetails.containers.image resource.kubernetesDetails.kubernetesWorkloadDetails.containers.imagePrefix service.ebsVolumeScanDetails.scanId service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.name service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.severity service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.filePaths.hash resource.ecsClusterDetails.name resource.ecsClusterDetails.taskDetails.containers.image resource.ecsClusterDetails.taskDetails.definitionArn resource.containerDetails.image resource.rdsDbInstanceDetails.dbInstanceIdentifier resource.rdsDbInstanceDetails.dbClusterIdentifier resource.rdsDbInstanceDetails.engine resource.rdsDbUserDetails.user resource.rdsDbInstanceDetails.tags.key resource.rdsDbInstanceDetails.tags.value service.runtimeDetails.process.executableSha256 service.runtimeDetails.process.name service.runtimeDetails.process.name resource.lambdaDetails.functionName resource.lambdaDetails.functionArn resource.lambdaDetails.tags.key resource.lambdaDetails.tags.value public let findingCriteria: FindingCriteria? /// The name of the filter. Valid characters include period (.), underscore (_), dash (-), and alphanumeric characters. A whitespace is considered to be an invalid character. public let name: String? @@ -2558,7 +2558,7 @@ extension GuardDuty { public struct DescribeMalwareScansResponse: AWSDecodableShape { /// The pagination parameter to be used on the next list operation to retrieve more items. public let nextToken: String? - /// Contains information about malware scans. + /// Contains information about malware scans associated with GuardDuty Malware Protection for EC2. public let scans: [Scan]? @inlinable @@ -6342,7 +6342,7 @@ extension GuardDuty { public struct OrganizationAdditionalConfiguration: AWSEncodableShape { /// The status of the additional configuration that will be configured for the organization. Use one of the following values to configure the feature status for the entire organization: NEW: Indicates that when a new account joins the organization, they will have the additional configuration enabled automatically. ALL: Indicates that all accounts in the organization have the additional configuration enabled automatically. This includes NEW accounts that join the organization and accounts that may have been suspended or removed from the organization in GuardDuty. It may take up to 24 hours to update the configuration for all the member accounts. NONE: Indicates that the additional configuration will not be automatically enabled for any account in the organization. The administrator must manage the additional configuration for each account individually. public let autoEnable: OrgFeatureStatus? - /// The name of the additional configuration that will be configured for the organization. + /// The name of the additional configuration that will be configured for the organization. These values are applicable to only Runtime Monitoring protection plan. public let name: OrgFeatureAdditionalConfiguration? @inlinable @@ -6360,7 +6360,7 @@ extension GuardDuty { public struct OrganizationAdditionalConfigurationResult: AWSDecodableShape { /// Describes the status of the additional configuration that is configured for the member accounts within the organization. One of the following values is the status for the entire organization: NEW: Indicates that when a new account joins the organization, they will have the additional configuration enabled automatically. ALL: Indicates that all accounts in the organization have the additional configuration enabled automatically. This includes NEW accounts that join the organization and accounts that may have been suspended or removed from the organization in GuardDuty. It may take up to 24 hours to update the configuration for all the member accounts. NONE: Indicates that the additional configuration will not be automatically enabled for any account in the organization. The administrator must manage the additional configuration for each account individually. public let autoEnable: OrgFeatureStatus? - /// The name of the additional configuration that is configured for the member accounts within the organization. + /// The name of the additional configuration that is configured for the member accounts within the organization. These values are applicable to only Runtime Monitoring protection plan. public let name: OrgFeatureAdditionalConfiguration? @inlinable @@ -7615,7 +7615,7 @@ extension GuardDuty { public let adminDetectorId: String? /// List of volumes that were attached to the original instance to be scanned. public let attachedVolumes: [VolumeDetail]? - /// The unique ID of the detector that the request is associated with. To find the detectorId in the current Region, see the + /// The unique ID of the detector that is associated with the request. To find the detectorId in the current Region, see the /// Settings page in the GuardDuty console, or run the ListDetectors API. public let detectorId: String? /// Represents the reason for FAILED scan status. @@ -8982,7 +8982,7 @@ extension GuardDuty { } public struct UpdateOrganizationConfigurationRequest: AWSEncodableShape { - /// Represents whether or not to automatically enable member accounts in the organization. Even though this is still supported, we recommend using AutoEnableOrganizationMembers to achieve the similar results. You must provide a value for either autoEnableOrganizationMembers or autoEnable. + /// Represents whether to automatically enable member accounts in the organization. This applies to only new member accounts, not the existing member accounts. When a new account joins the organization, the chosen features will be enabled for them by default. Even though this is still supported, we recommend using AutoEnableOrganizationMembers to achieve the similar results. You must provide a value for either autoEnableOrganizationMembers or autoEnable. public let autoEnable: Bool? /// Indicates the auto-enablement configuration of GuardDuty for the member accounts in the organization. You must provide a value for either autoEnableOrganizationMembers or autoEnable. Use one of the following configuration values for autoEnableOrganizationMembers: NEW: Indicates that when a new account joins the organization, they will have GuardDuty enabled automatically. ALL: Indicates that all accounts in the organization have GuardDuty enabled automatically. This includes NEW accounts that join the organization and accounts that may have been suspended or removed from the organization in GuardDuty. It may take up to 24 hours to update the configuration for all the member accounts. NONE: Indicates that GuardDuty will not be automatically enabled for any account in the organization. The administrator must manage GuardDuty for each account in the organization individually. When you update the auto-enable setting from ALL or NEW to NONE, this action doesn't disable the corresponding option for your existing accounts. This configuration will apply to the new accounts that join the organization. After you update the auto-enable settings, no new account will have the corresponding option as enabled. public let autoEnableOrganizationMembers: AutoEnableMembers? diff --git a/Sources/Soto/Services/IVSRealTime/IVSRealTime_shapes.swift b/Sources/Soto/Services/IVSRealTime/IVSRealTime_shapes.swift index cbe14035d8..bbf76bea30 100644 --- a/Sources/Soto/Services/IVSRealTime/IVSRealTime_shapes.swift +++ b/Sources/Soto/Services/IVSRealTime/IVSRealTime_shapes.swift @@ -108,6 +108,7 @@ extension IVSRealTime { public enum ParticipantRecordingMediaType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case audioOnly = "AUDIO_ONLY" case audioVideo = "AUDIO_VIDEO" + case none = "NONE" public var description: String { return self.rawValue } } @@ -152,6 +153,18 @@ extension IVSRealTime { public var description: String { return self.rawValue } } + public enum ThumbnailRecordingMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case interval = "INTERVAL" + public var description: String { return self.rawValue } + } + + public enum ThumbnailStorageType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case latest = "LATEST" + case sequential = "SEQUENTIAL" + public var description: String { return self.rawValue } + } + public enum VideoAspectRatio: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case auto = "AUTO" case portrait = "PORTRAIT" @@ -174,22 +187,27 @@ extension IVSRealTime { public let mediaTypes: [ParticipantRecordingMediaType]? /// ARN of the StorageConfiguration resource to use for individual participant recording. Default: "" (empty string, no storage configuration is specified). Individual participant recording cannot be started unless a storage configuration is specified, when a Stage is created or updated. public let storageConfigurationArn: String + /// A complex type that allows you to enable/disable the recording of thumbnails for individual participant recording and modify the interval at which thumbnails are generated for the live session. + public let thumbnailConfiguration: ParticipantThumbnailConfiguration? @inlinable - public init(mediaTypes: [ParticipantRecordingMediaType]? = nil, storageConfigurationArn: String) { + public init(mediaTypes: [ParticipantRecordingMediaType]? = nil, storageConfigurationArn: String, thumbnailConfiguration: ParticipantThumbnailConfiguration? = nil) { self.mediaTypes = mediaTypes self.storageConfigurationArn = storageConfigurationArn + self.thumbnailConfiguration = thumbnailConfiguration } public func validate(name: String) throws { try self.validate(self.mediaTypes, name: "mediaTypes", parent: name, max: 1) try self.validate(self.storageConfigurationArn, name: "storageConfigurationArn", parent: name, max: 128) try self.validate(self.storageConfigurationArn, name: "storageConfigurationArn", parent: name, pattern: "^^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:storage-configuration/[a-zA-Z0-9-]+$$") + try self.thumbnailConfiguration?.validate(name: "\(name).thumbnailConfiguration") } private enum CodingKeys: String, CodingKey { case mediaTypes = "mediaTypes" case storageConfigurationArn = "storageConfigurationArn" + case thumbnailConfiguration = "thumbnailConfiguration" } } @@ -305,6 +323,34 @@ extension IVSRealTime { } } + public struct CompositionThumbnailConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Indicates the format in which thumbnails are recorded. SEQUENTIAL records all generated thumbnails in a serial manner, to the media/thumbnails/(width)x(height) directory, where (width) and (height) are the width + /// and height of the thumbnail. LATEST saves the latest thumbnail in + /// media/latest_thumbnail/(width)x(height)/thumb.jpg and overwrites it at the interval specified by + /// targetIntervalSeconds. You can enable both SEQUENTIAL and LATEST. + /// Default: SEQUENTIAL. + public let storage: [ThumbnailStorageType]? + /// The targeted thumbnail-generation interval in seconds. Default: 60. + public let targetIntervalSeconds: Int? + + @inlinable + public init(storage: [ThumbnailStorageType]? = nil, targetIntervalSeconds: Int? = nil) { + self.storage = storage + self.targetIntervalSeconds = targetIntervalSeconds + } + + public func validate(name: String) throws { + try self.validate(self.storage, name: "storage", parent: name, max: 2) + try self.validate(self.targetIntervalSeconds, name: "targetIntervalSeconds", parent: name, max: 86400) + try self.validate(self.targetIntervalSeconds, name: "targetIntervalSeconds", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case storage = "storage" + case targetIntervalSeconds = "targetIntervalSeconds" + } + } + public struct CreateEncoderConfigurationRequest: AWSEncodableShape { /// Optional name to identify the resource. public let name: String? @@ -2071,6 +2117,37 @@ extension IVSRealTime { } } + public struct ParticipantThumbnailConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Thumbnail recording mode. Default: DISABLED. + public let recordingMode: ThumbnailRecordingMode? + /// Indicates the format in which thumbnails are recorded. SEQUENTIAL records all generated thumbnails in a serial manner, to the media/thumbnails/high directory. LATEST saves the latest thumbnail + /// in media/latest_thumbnail/high/thumb.jpg and overwrites it at the interval specified by + /// targetIntervalSeconds. You can enable both SEQUENTIAL and LATEST. + /// Default: SEQUENTIAL. + public let storage: [ThumbnailStorageType]? + /// The targeted thumbnail-generation interval in seconds. This is configurable only if recordingMode is INTERVAL. Default: 60. + public let targetIntervalSeconds: Int? + + @inlinable + public init(recordingMode: ThumbnailRecordingMode? = nil, storage: [ThumbnailStorageType]? = nil, targetIntervalSeconds: Int? = nil) { + self.recordingMode = recordingMode + self.storage = storage + self.targetIntervalSeconds = targetIntervalSeconds + } + + public func validate(name: String) throws { + try self.validate(self.storage, name: "storage", parent: name, max: 2) + try self.validate(self.targetIntervalSeconds, name: "targetIntervalSeconds", parent: name, max: 86400) + try self.validate(self.targetIntervalSeconds, name: "targetIntervalSeconds", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case recordingMode = "recordingMode" + case storage = "storage" + case targetIntervalSeconds = "targetIntervalSeconds" + } + } + public struct ParticipantToken: AWSDecodableShape { /// Application-provided attributes to encode into the token and attach to a stage. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information. public let attributes: [String: String]? @@ -2287,12 +2364,15 @@ extension IVSRealTime { public let recordingConfiguration: RecordingConfiguration? /// ARN of the StorageConfiguration where recorded videos will be stored. public let storageConfigurationArn: String + /// A complex type that allows you to enable/disable the recording of thumbnails for a Composition and modify the interval at which thumbnails are generated for the live session. + public let thumbnailConfigurations: [CompositionThumbnailConfiguration]? @inlinable - public init(encoderConfigurationArns: [String], recordingConfiguration: RecordingConfiguration? = nil, storageConfigurationArn: String) { + public init(encoderConfigurationArns: [String], recordingConfiguration: RecordingConfiguration? = nil, storageConfigurationArn: String, thumbnailConfigurations: [CompositionThumbnailConfiguration]? = nil) { self.encoderConfigurationArns = encoderConfigurationArns self.recordingConfiguration = recordingConfiguration self.storageConfigurationArn = storageConfigurationArn + self.thumbnailConfigurations = thumbnailConfigurations } public func validate(name: String) throws { @@ -2306,12 +2386,17 @@ extension IVSRealTime { try self.validate(self.storageConfigurationArn, name: "storageConfigurationArn", parent: name, max: 128) try self.validate(self.storageConfigurationArn, name: "storageConfigurationArn", parent: name, min: 1) try self.validate(self.storageConfigurationArn, name: "storageConfigurationArn", parent: name, pattern: "^arn:aws:ivs:[a-z0-9-]+:[0-9]+:storage-configuration/[a-zA-Z0-9-]+$") + try self.thumbnailConfigurations?.forEach { + try $0.validate(name: "\(name).thumbnailConfigurations[]") + } + try self.validate(self.thumbnailConfigurations, name: "thumbnailConfigurations", parent: name, max: 1) } private enum CodingKeys: String, CodingKey { case encoderConfigurationArns = "encoderConfigurationArns" case recordingConfiguration = "recordingConfiguration" case storageConfigurationArn = "storageConfigurationArn" + case thumbnailConfigurations = "thumbnailConfigurations" } } diff --git a/Sources/Soto/Services/InternetMonitor/InternetMonitor_api.swift b/Sources/Soto/Services/InternetMonitor/InternetMonitor_api.swift index e1d73696fb..2e5990b513 100644 --- a/Sources/Soto/Services/InternetMonitor/InternetMonitor_api.swift +++ b/Sources/Soto/Services/InternetMonitor/InternetMonitor_api.swift @@ -129,35 +129,49 @@ public struct InternetMonitor: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "af-south-1": "internetmonitor.af-south-1.api.aws", + "ap-east-1": "internetmonitor.ap-east-1.api.aws", + "ap-northeast-1": "internetmonitor.ap-northeast-1.api.aws", + "ap-northeast-2": "internetmonitor.ap-northeast-2.api.aws", + "ap-northeast-3": "internetmonitor.ap-northeast-3.api.aws", + "ap-south-1": "internetmonitor.ap-south-1.api.aws", + "ap-south-2": "internetmonitor.ap-south-2.api.aws", + "ap-southeast-1": "internetmonitor.ap-southeast-1.api.aws", + "ap-southeast-2": "internetmonitor.ap-southeast-2.api.aws", + "ap-southeast-3": "internetmonitor.ap-southeast-3.api.aws", + "ap-southeast-4": "internetmonitor.ap-southeast-4.api.aws", + "ca-central-1": "internetmonitor.ca-central-1.api.aws", + "eu-central-1": "internetmonitor.eu-central-1.api.aws", + "eu-central-2": "internetmonitor.eu-central-2.api.aws", + "eu-north-1": "internetmonitor.eu-north-1.api.aws", + "eu-south-1": "internetmonitor.eu-south-1.api.aws", + "eu-south-2": "internetmonitor.eu-south-2.api.aws", + "eu-west-1": "internetmonitor.eu-west-1.api.aws", + "eu-west-2": "internetmonitor.eu-west-2.api.aws", + "eu-west-3": "internetmonitor.eu-west-3.api.aws", + "me-central-1": "internetmonitor.me-central-1.api.aws", + "me-south-1": "internetmonitor.me-south-1.api.aws", + "sa-east-1": "internetmonitor.sa-east-1.api.aws", + "us-east-1": "internetmonitor.us-east-1.api.aws", + "us-east-2": "internetmonitor.us-east-2.api.aws", + "us-west-1": "internetmonitor.us-west-1.api.aws", + "us-west-2": "internetmonitor.us-west-2.api.aws" + ]), + [.dualstack, .fips]: .init(endpoints: [ + "ca-central-1": "internetmonitor-fips.ca-central-1.api.aws", + "us-east-1": "internetmonitor-fips.us-east-1.api.aws", + "us-east-2": "internetmonitor-fips.us-east-2.api.aws", + "us-west-1": "internetmonitor-fips.us-west-1.api.aws", + "us-west-2": "internetmonitor-fips.us-west-2.api.aws" + ]), [.fips]: .init(endpoints: [ - "af-south-1": "internetmonitor-fips.af-south-1.api.aws", - "ap-east-1": "internetmonitor-fips.ap-east-1.api.aws", - "ap-northeast-1": "internetmonitor-fips.ap-northeast-1.api.aws", - "ap-northeast-2": "internetmonitor-fips.ap-northeast-2.api.aws", - "ap-northeast-3": "internetmonitor-fips.ap-northeast-3.api.aws", - "ap-south-1": "internetmonitor-fips.ap-south-1.api.aws", - "ap-south-2": "internetmonitor-fips.ap-south-2.api.aws", - "ap-southeast-1": "internetmonitor-fips.ap-southeast-1.api.aws", - "ap-southeast-2": "internetmonitor-fips.ap-southeast-2.api.aws", - "ap-southeast-3": "internetmonitor-fips.ap-southeast-3.api.aws", - "ap-southeast-4": "internetmonitor-fips.ap-southeast-4.api.aws", "ap-southeast-5": "internetmonitor-fips.ap-southeast-5.api.aws", "ca-central-1": "internetmonitor-fips.ca-central-1.amazonaws.com", "ca-west-1": "internetmonitor-fips.ca-west-1.api.aws", "cn-north-1": "internetmonitor-fips.cn-north-1.api.amazonwebservices.com.cn", "cn-northwest-1": "internetmonitor-fips.cn-northwest-1.api.amazonwebservices.com.cn", - "eu-central-1": "internetmonitor-fips.eu-central-1.api.aws", - "eu-central-2": "internetmonitor-fips.eu-central-2.api.aws", - "eu-north-1": "internetmonitor-fips.eu-north-1.api.aws", - "eu-south-1": "internetmonitor-fips.eu-south-1.api.aws", - "eu-south-2": "internetmonitor-fips.eu-south-2.api.aws", - "eu-west-1": "internetmonitor-fips.eu-west-1.api.aws", - "eu-west-2": "internetmonitor-fips.eu-west-2.api.aws", - "eu-west-3": "internetmonitor-fips.eu-west-3.api.aws", "il-central-1": "internetmonitor-fips.il-central-1.api.aws", - "me-central-1": "internetmonitor-fips.me-central-1.api.aws", - "me-south-1": "internetmonitor-fips.me-south-1.api.aws", - "sa-east-1": "internetmonitor-fips.sa-east-1.api.aws", "us-east-1": "internetmonitor-fips.us-east-1.amazonaws.com", "us-east-2": "internetmonitor-fips.us-east-2.amazonaws.com", "us-gov-east-1": "internetmonitor-fips.us-gov-east-1.api.aws", diff --git a/Sources/Soto/Services/Keyspaces/Keyspaces_api.swift b/Sources/Soto/Services/Keyspaces/Keyspaces_api.swift index adac3c0daf..a06613e6d1 100644 --- a/Sources/Soto/Services/Keyspaces/Keyspaces_api.swift +++ b/Sources/Soto/Services/Keyspaces/Keyspaces_api.swift @@ -709,7 +709,7 @@ public struct Keyspaces: AWSService { return try await self.untagResource(input, logger: logger) } - /// Adds a new Amazon Web Services Region to the keyspace. You can add a new Region to a keyspace that is either a single or a multi-Region keyspace. The new replica Region is applied to all tables in the keyspace. For more information, see Add an Amazon Web Services Region to a keyspace in Amazon Keyspaces in the Amazon Keyspaces Developer Guide. To change a single-Region to a multi-Region keyspace, you have to enable client-side timestamps for all tables in the keyspace. For more information, see Client-side timestamps in Amazon Keyspaces in the Amazon Keyspaces Developer Guide. + /// Adds a new Amazon Web Services Region to the keyspace. You can add a new Region to a keyspace that is either a single or a multi-Region keyspace. Amazon Keyspaces is going to replicate all tables in the keyspace to the new Region. To successfully replicate all tables to the new Region, they must use client-side timestamps for conflict resolution. To enable client-side timestamps, specify clientSideTimestamps.status = enabled when invoking the API. For more information about client-side timestamps, see Client-side timestamps in Amazon Keyspaces in the Amazon Keyspaces Developer Guide. To add a Region to a keyspace using the UpdateKeyspace API, the IAM principal needs permissions for the following IAM actions: cassandra:Alter cassandra:AlterMultiRegionResource cassandra:Create cassandra:CreateMultiRegionResource cassandra:Select cassandra:SelectMultiRegionResource cassandra:Modify cassandra:ModifyMultiRegionResource If the keyspace contains a table that is configured in provisioned mode with auto scaling enabled, the following additional IAM actions need to be allowed. application-autoscaling:RegisterScalableTarget application-autoscaling:DeregisterScalableTarget application-autoscaling:DescribeScalableTargets application-autoscaling:PutScalingPolicy application-autoscaling:DescribeScalingPolicies To use the UpdateKeyspace API, the IAM principal also needs permissions to create a service-linked role with the following elements: iam:CreateServiceLinkedRole - The action the principal can perform. arn:aws:iam::*:role/aws-service-role/replication.cassandra.amazonaws.com/AWSServiceRoleForKeyspacesReplication - The resource that the action can be performed on. iam:AWSServiceName: replication.cassandra.amazonaws.com - The only Amazon Web Services service that this role can be attached to is Amazon Keyspaces. For more information, see Configure the IAM permissions required to add an Amazon Web Services Region to a keyspace in the Amazon Keyspaces Developer Guide. @Sendable @inlinable public func updateKeyspace(_ input: UpdateKeyspaceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateKeyspaceResponse { @@ -722,7 +722,7 @@ public struct Keyspaces: AWSService { logger: logger ) } - /// Adds a new Amazon Web Services Region to the keyspace. You can add a new Region to a keyspace that is either a single or a multi-Region keyspace. The new replica Region is applied to all tables in the keyspace. For more information, see Add an Amazon Web Services Region to a keyspace in Amazon Keyspaces in the Amazon Keyspaces Developer Guide. To change a single-Region to a multi-Region keyspace, you have to enable client-side timestamps for all tables in the keyspace. For more information, see Client-side timestamps in Amazon Keyspaces in the Amazon Keyspaces Developer Guide. + /// Adds a new Amazon Web Services Region to the keyspace. You can add a new Region to a keyspace that is either a single or a multi-Region keyspace. Amazon Keyspaces is going to replicate all tables in the keyspace to the new Region. To successfully replicate all tables to the new Region, they must use client-side timestamps for conflict resolution. To enable client-side timestamps, specify clientSideTimestamps.status = enabled when invoking the API. For more information about client-side timestamps, see Client-side timestamps in Amazon Keyspaces in the Amazon Keyspaces Developer Guide. To add a Region to a keyspace using the UpdateKeyspace API, the IAM principal needs permissions for the following IAM actions: cassandra:Alter cassandra:AlterMultiRegionResource cassandra:Create cassandra:CreateMultiRegionResource cassandra:Select cassandra:SelectMultiRegionResource cassandra:Modify cassandra:ModifyMultiRegionResource If the keyspace contains a table that is configured in provisioned mode with auto scaling enabled, the following additional IAM actions need to be allowed. application-autoscaling:RegisterScalableTarget application-autoscaling:DeregisterScalableTarget application-autoscaling:DescribeScalableTargets application-autoscaling:PutScalingPolicy application-autoscaling:DescribeScalingPolicies To use the UpdateKeyspace API, the IAM principal also needs permissions to create a service-linked role with the following elements: iam:CreateServiceLinkedRole - The action the principal can perform. arn:aws:iam::*:role/aws-service-role/replication.cassandra.amazonaws.com/AWSServiceRoleForKeyspacesReplication - The resource that the action can be performed on. iam:AWSServiceName: replication.cassandra.amazonaws.com - The only Amazon Web Services service that this role can be attached to is Amazon Keyspaces. For more information, see Configure the IAM permissions required to add an Amazon Web Services Region to a keyspace in the Amazon Keyspaces Developer Guide. /// /// Parameters: /// - clientSideTimestamps: diff --git a/Sources/Soto/Services/LakeFormation/LakeFormation_api.swift b/Sources/Soto/Services/LakeFormation/LakeFormation_api.swift index c1fdabf461..8a71c3cddc 100644 --- a/Sources/Soto/Services/LakeFormation/LakeFormation_api.swift +++ b/Sources/Soto/Services/LakeFormation/LakeFormation_api.swift @@ -80,12 +80,48 @@ public struct LakeFormation: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ [.dualstack]: .init(endpoints: [ + "af-south-1": "lakeformation.af-south-1.api.aws", + "ap-east-1": "lakeformation.ap-east-1.api.aws", + "ap-northeast-1": "lakeformation.ap-northeast-1.api.aws", + "ap-northeast-2": "lakeformation.ap-northeast-2.api.aws", + "ap-northeast-3": "lakeformation.ap-northeast-3.api.aws", + "ap-south-1": "lakeformation.ap-south-1.api.aws", + "ap-south-2": "lakeformation.ap-south-2.api.aws", + "ap-southeast-1": "lakeformation.ap-southeast-1.api.aws", + "ap-southeast-2": "lakeformation.ap-southeast-2.api.aws", + "ap-southeast-3": "lakeformation.ap-southeast-3.api.aws", + "ap-southeast-4": "lakeformation.ap-southeast-4.api.aws", + "ap-southeast-5": "lakeformation.ap-southeast-5.api.aws", + "ca-central-1": "lakeformation.ca-central-1.api.aws", + "ca-west-1": "lakeformation.ca-west-1.api.aws", + "cn-north-1": "lakeformation.cn-north-1.api.amazonwebservices.com.cn", + "cn-northwest-1": "lakeformation.cn-northwest-1.api.amazonwebservices.com.cn", + "eu-central-1": "lakeformation.eu-central-1.api.aws", + "eu-central-2": "lakeformation.eu-central-2.api.aws", + "eu-north-1": "lakeformation.eu-north-1.api.aws", + "eu-south-1": "lakeformation.eu-south-1.api.aws", + "eu-south-2": "lakeformation.eu-south-2.api.aws", + "eu-west-1": "lakeformation.eu-west-1.api.aws", + "eu-west-2": "lakeformation.eu-west-2.api.aws", + "eu-west-3": "lakeformation.eu-west-3.api.aws", + "il-central-1": "lakeformation.il-central-1.api.aws", + "me-central-1": "lakeformation.me-central-1.api.aws", + "me-south-1": "lakeformation.me-south-1.api.aws", + "sa-east-1": "lakeformation.sa-east-1.api.aws", + "us-east-1": "lakeformation.us-east-1.api.aws", + "us-east-2": "lakeformation.us-east-2.api.aws", "us-gov-east-1": "lakeformation.us-gov-east-1.api.aws", - "us-gov-west-1": "lakeformation.us-gov-west-1.api.aws" + "us-gov-west-1": "lakeformation.us-gov-west-1.api.aws", + "us-west-1": "lakeformation.us-west-1.api.aws", + "us-west-2": "lakeformation.us-west-2.api.aws" ]), [.dualstack, .fips]: .init(endpoints: [ + "us-east-1": "lakeformation-fips.us-east-1.api.aws", + "us-east-2": "lakeformation-fips.us-east-2.api.aws", "us-gov-east-1": "lakeformation-fips.us-gov-east-1.api.aws", - "us-gov-west-1": "lakeformation-fips.us-gov-west-1.api.aws" + "us-gov-west-1": "lakeformation-fips.us-gov-west-1.api.aws", + "us-west-1": "lakeformation-fips.us-west-1.api.aws", + "us-west-2": "lakeformation-fips.us-west-2.api.aws" ]), [.fips]: .init(endpoints: [ "us-east-1": "lakeformation-fips.us-east-1.amazonaws.com", diff --git a/Sources/Soto/Services/M2/M2_api.swift b/Sources/Soto/Services/M2/M2_api.swift index e25480032a..c888c38830 100644 --- a/Sources/Soto/Services/M2/M2_api.swift +++ b/Sources/Soto/Services/M2/M2_api.swift @@ -274,6 +274,7 @@ public struct M2: AWSService { /// - instanceType: The type of instance for the runtime environment. /// - kmsKeyId: The identifier of a customer managed key. /// - name: The name of the runtime environment. Must be unique within the account. + /// - networkType: The network type required for the runtime environment. /// - preferredMaintenanceWindow: Configures the maintenance window that you want for the runtime environment. The maintenance window must have the format ddd:hh24:mi-ddd:hh24:mi and must be less than 24 hours. The following two examples are valid maintenance windows: sun:23:45-mon:00:15 or sat:01:00-sat:03:00. If you do not provide a value, a random system-generated value will be assigned. /// - publiclyAccessible: Specifies whether the runtime environment is publicly accessible. /// - securityGroupIds: The list of security groups for the VPC associated with this runtime environment. @@ -291,6 +292,7 @@ public struct M2: AWSService { instanceType: String, kmsKeyId: String? = nil, name: String, + networkType: NetworkType? = nil, preferredMaintenanceWindow: String? = nil, publiclyAccessible: Bool? = nil, securityGroupIds: [String]? = nil, @@ -308,6 +310,7 @@ public struct M2: AWSService { instanceType: instanceType, kmsKeyId: kmsKeyId, name: name, + networkType: networkType, preferredMaintenanceWindow: preferredMaintenanceWindow, publiclyAccessible: publiclyAccessible, securityGroupIds: securityGroupIds, diff --git a/Sources/Soto/Services/M2/M2_shapes.swift b/Sources/Soto/Services/M2/M2_shapes.swift index d22a251917..36dcd49b06 100644 --- a/Sources/Soto/Services/M2/M2_shapes.swift +++ b/Sources/Soto/Services/M2/M2_shapes.swift @@ -107,6 +107,12 @@ extension M2 { public var description: String { return self.rawValue } } + public enum NetworkType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case dual = "dual" + case ipv4 = "ipv4" + public var description: String { return self.rawValue } + } + public enum BatchJobDefinition: AWSDecodableShape, Sendable { /// Specifies a file containing a batch job definition. case fileBatchJobDefinition(FileBatchJobDefinition) @@ -854,6 +860,8 @@ extension M2 { public let kmsKeyId: String? /// The name of the runtime environment. Must be unique within the account. public let name: String + /// The network type required for the runtime environment. + public let networkType: NetworkType? /// Configures the maintenance window that you want for the runtime environment. The maintenance window must have the format ddd:hh24:mi-ddd:hh24:mi and must be less than 24 hours. The following two examples are valid maintenance windows: sun:23:45-mon:00:15 or sat:01:00-sat:03:00. If you do not provide a value, a random system-generated value will be assigned. public let preferredMaintenanceWindow: String? /// Specifies whether the runtime environment is publicly accessible. @@ -868,7 +876,7 @@ extension M2 { public let tags: [String: String]? @inlinable - public init(clientToken: String? = CreateEnvironmentRequest.idempotencyToken(), description: String? = nil, engineType: EngineType, engineVersion: String? = nil, highAvailabilityConfig: HighAvailabilityConfig? = nil, instanceType: String, kmsKeyId: String? = nil, name: String, preferredMaintenanceWindow: String? = nil, publiclyAccessible: Bool? = nil, securityGroupIds: [String]? = nil, storageConfigurations: [StorageConfiguration]? = nil, subnetIds: [String]? = nil, tags: [String: String]? = nil) { + public init(clientToken: String? = CreateEnvironmentRequest.idempotencyToken(), description: String? = nil, engineType: EngineType, engineVersion: String? = nil, highAvailabilityConfig: HighAvailabilityConfig? = nil, instanceType: String, kmsKeyId: String? = nil, name: String, networkType: NetworkType? = nil, preferredMaintenanceWindow: String? = nil, publiclyAccessible: Bool? = nil, securityGroupIds: [String]? = nil, storageConfigurations: [StorageConfiguration]? = nil, subnetIds: [String]? = nil, tags: [String: String]? = nil) { self.clientToken = clientToken self.description = description self.engineType = engineType @@ -877,6 +885,7 @@ extension M2 { self.instanceType = instanceType self.kmsKeyId = kmsKeyId self.name = name + self.networkType = networkType self.preferredMaintenanceWindow = preferredMaintenanceWindow self.publiclyAccessible = publiclyAccessible self.securityGroupIds = securityGroupIds @@ -920,6 +929,7 @@ extension M2 { case instanceType = "instanceType" case kmsKeyId = "kmsKeyId" case name = "name" + case networkType = "networkType" case preferredMaintenanceWindow = "preferredMaintenanceWindow" case publiclyAccessible = "publiclyAccessible" case securityGroupIds = "securityGroupIds" @@ -1289,11 +1299,13 @@ extension M2 { public let instanceType: String /// The name of the runtime environment. public let name: String + /// The network type supported by the runtime environment. + public let networkType: NetworkType? /// The status of the runtime environment public let status: EnvironmentLifecycle @inlinable - public init(creationTime: Date, engineType: EngineType, engineVersion: String, environmentArn: String, environmentId: String, instanceType: String, name: String, status: EnvironmentLifecycle) { + public init(creationTime: Date, engineType: EngineType, engineVersion: String, environmentArn: String, environmentId: String, instanceType: String, name: String, networkType: NetworkType? = nil, status: EnvironmentLifecycle) { self.creationTime = creationTime self.engineType = engineType self.engineVersion = engineVersion @@ -1301,6 +1313,7 @@ extension M2 { self.environmentId = environmentId self.instanceType = instanceType self.name = name + self.networkType = networkType self.status = status } @@ -1312,6 +1325,7 @@ extension M2 { case environmentId = "environmentId" case instanceType = "instanceType" case name = "name" + case networkType = "networkType" case status = "status" } } @@ -1911,6 +1925,8 @@ extension M2 { public let loadBalancerArn: String? /// The name of the runtime environment. Must be unique within the account. public let name: String + /// The network type supported by the runtime environment. + public let networkType: NetworkType? /// Indicates the pending maintenance scheduled on this environment. public let pendingMaintenance: PendingMaintenance? /// The maintenance window for the runtime environment. If you don't provide a value for the maintenance window, the service assigns a random value. @@ -1933,7 +1949,7 @@ extension M2 { public let vpcId: String @inlinable - public init(actualCapacity: Int? = nil, creationTime: Date, description: String? = nil, engineType: EngineType, engineVersion: String, environmentArn: String, environmentId: String, highAvailabilityConfig: HighAvailabilityConfig? = nil, instanceType: String, kmsKeyId: String? = nil, loadBalancerArn: String? = nil, name: String, pendingMaintenance: PendingMaintenance? = nil, preferredMaintenanceWindow: String? = nil, publiclyAccessible: Bool? = nil, securityGroupIds: [String], status: EnvironmentLifecycle, statusReason: String? = nil, storageConfigurations: [StorageConfiguration]? = nil, subnetIds: [String], tags: [String: String]? = nil, vpcId: String) { + public init(actualCapacity: Int? = nil, creationTime: Date, description: String? = nil, engineType: EngineType, engineVersion: String, environmentArn: String, environmentId: String, highAvailabilityConfig: HighAvailabilityConfig? = nil, instanceType: String, kmsKeyId: String? = nil, loadBalancerArn: String? = nil, name: String, networkType: NetworkType? = nil, pendingMaintenance: PendingMaintenance? = nil, preferredMaintenanceWindow: String? = nil, publiclyAccessible: Bool? = nil, securityGroupIds: [String], status: EnvironmentLifecycle, statusReason: String? = nil, storageConfigurations: [StorageConfiguration]? = nil, subnetIds: [String], tags: [String: String]? = nil, vpcId: String) { self.actualCapacity = actualCapacity self.creationTime = creationTime self.description = description @@ -1946,6 +1962,7 @@ extension M2 { self.kmsKeyId = kmsKeyId self.loadBalancerArn = loadBalancerArn self.name = name + self.networkType = networkType self.pendingMaintenance = pendingMaintenance self.preferredMaintenanceWindow = preferredMaintenanceWindow self.publiclyAccessible = publiclyAccessible @@ -1971,6 +1988,7 @@ extension M2 { case kmsKeyId = "kmsKeyId" case loadBalancerArn = "loadBalancerArn" case name = "name" + case networkType = "networkType" case pendingMaintenance = "pendingMaintenance" case preferredMaintenanceWindow = "preferredMaintenanceWindow" case publiclyAccessible = "publiclyAccessible" diff --git a/Sources/Soto/Services/MarketplaceAgreement/MarketplaceAgreement_api.swift b/Sources/Soto/Services/MarketplaceAgreement/MarketplaceAgreement_api.swift index 84ae6155dc..6f5ef97ea9 100644 --- a/Sources/Soto/Services/MarketplaceAgreement/MarketplaceAgreement_api.swift +++ b/Sources/Soto/Services/MarketplaceAgreement/MarketplaceAgreement_api.swift @@ -67,6 +67,7 @@ public struct MarketplaceAgreement: AWSService { serviceProtocol: .json(version: "1.0"), apiVersion: "2020-03-01", endpoint: endpoint, + variantEndpoints: Self.variantEndpoints, errorType: MarketplaceAgreementErrorType.self, middleware: middleware, timeout: timeout, @@ -78,6 +79,12 @@ public struct MarketplaceAgreement: AWSService { + /// FIPS and dualstack endpoints + static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.fips]: .init(endpoints: [ + "us-iso-east-1": "agreement-marketplace-fips.us-iso-east-1.c2s.ic.gov" + ]) + ]} // MARK: API Calls diff --git a/Sources/Soto/Services/MediaConnect/MediaConnect_shapes.swift b/Sources/Soto/Services/MediaConnect/MediaConnect_shapes.swift index 56bbb2313f..4b36715377 100644 --- a/Sources/Soto/Services/MediaConnect/MediaConnect_shapes.swift +++ b/Sources/Soto/Services/MediaConnect/MediaConnect_shapes.swift @@ -317,6 +317,7 @@ extension MediaConnect { public struct AddBridgeNetworkSourceRequest: AWSEncodableShape { /// The network source multicast IP. public let multicastIp: String? + public let multicastSourceSettings: MulticastSourceSettings? /// The name of the network source. This name is used to reference the source and must be unique among sources in this bridge. public let name: String? /// The network source's gateway network name. @@ -327,8 +328,9 @@ extension MediaConnect { public let `protocol`: `Protocol`? @inlinable - public init(multicastIp: String? = nil, name: String? = nil, networkName: String? = nil, port: Int? = nil, protocol: `Protocol`? = nil) { + public init(multicastIp: String? = nil, multicastSourceSettings: MulticastSourceSettings? = nil, name: String? = nil, networkName: String? = nil, port: Int? = nil, protocol: `Protocol`? = nil) { self.multicastIp = multicastIp + self.multicastSourceSettings = multicastSourceSettings self.name = name self.networkName = networkName self.port = port @@ -337,6 +339,7 @@ extension MediaConnect { private enum CodingKeys: String, CodingKey { case multicastIp = "multicastIp" + case multicastSourceSettings = "multicastSourceSettings" case name = "name" case networkName = "networkName" case port = "port" @@ -917,6 +920,7 @@ extension MediaConnect { public struct BridgeNetworkSource: AWSDecodableShape { /// The network source multicast IP. public let multicastIp: String? + public let multicastSourceSettings: MulticastSourceSettings? /// The name of the network source. public let name: String? /// The network source's gateway network name. @@ -927,8 +931,9 @@ extension MediaConnect { public let `protocol`: `Protocol`? @inlinable - public init(multicastIp: String? = nil, name: String? = nil, networkName: String? = nil, port: Int? = nil, protocol: `Protocol`? = nil) { + public init(multicastIp: String? = nil, multicastSourceSettings: MulticastSourceSettings? = nil, name: String? = nil, networkName: String? = nil, port: Int? = nil, protocol: `Protocol`? = nil) { self.multicastIp = multicastIp + self.multicastSourceSettings = multicastSourceSettings self.name = name self.networkName = networkName self.port = port @@ -937,6 +942,7 @@ extension MediaConnect { private enum CodingKeys: String, CodingKey { case multicastIp = "multicastIp" + case multicastSourceSettings = "multicastSourceSettings" case name = "name" case networkName = "networkName" case port = "port" @@ -2914,6 +2920,20 @@ extension MediaConnect { } } + public struct MulticastSourceSettings: AWSEncodableShape & AWSDecodableShape { + /// The IP address of the source for source-specific multicast (SSM). + public let multicastSourceIp: String? + + @inlinable + public init(multicastSourceIp: String? = nil) { + self.multicastSourceIp = multicastSourceIp + } + + private enum CodingKeys: String, CodingKey { + case multicastSourceIp = "multicastSourceIp" + } + } + public struct Offering: AWSDecodableShape { /// The type of currency that is used for billing. The currencyCode used for all reservations is US dollars. public let currencyCode: String? @@ -3967,6 +3987,7 @@ extension MediaConnect { public struct UpdateBridgeNetworkSourceRequest: AWSEncodableShape { /// The network source multicast IP. public let multicastIp: String? + public let multicastSourceSettings: MulticastSourceSettings? /// The network source's gateway network name. public let networkName: String? /// The network source port. @@ -3975,8 +3996,9 @@ extension MediaConnect { public let `protocol`: `Protocol`? @inlinable - public init(multicastIp: String? = nil, networkName: String? = nil, port: Int? = nil, protocol: `Protocol`? = nil) { + public init(multicastIp: String? = nil, multicastSourceSettings: MulticastSourceSettings? = nil, networkName: String? = nil, port: Int? = nil, protocol: `Protocol`? = nil) { self.multicastIp = multicastIp + self.multicastSourceSettings = multicastSourceSettings self.networkName = networkName self.port = port self.`protocol` = `protocol` @@ -3984,6 +4006,7 @@ extension MediaConnect { private enum CodingKeys: String, CodingKey { case multicastIp = "multicastIp" + case multicastSourceSettings = "multicastSourceSettings" case networkName = "networkName" case port = "port" case `protocol` = "protocol" diff --git a/Sources/Soto/Services/MediaLive/MediaLive_api.swift b/Sources/Soto/Services/MediaLive/MediaLive_api.swift index 005139f54a..0785de2915 100644 --- a/Sources/Soto/Services/MediaLive/MediaLive_api.swift +++ b/Sources/Soto/Services/MediaLive/MediaLive_api.swift @@ -3868,19 +3868,16 @@ public struct MediaLive: AWSService { /// /// Parameters: /// - inputSecurityGroupId: The id of the Input Security Group to update. - /// - tags: A collection of key-value pairs. /// - whitelistRules: List of IPv4 CIDR addresses to whitelist /// - logger: Logger use during operation @inlinable public func updateInputSecurityGroup( inputSecurityGroupId: String, - tags: [String: String]? = nil, whitelistRules: [InputWhitelistRuleCidr]? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> UpdateInputSecurityGroupResponse { let input = UpdateInputSecurityGroupRequest( inputSecurityGroupId: inputSecurityGroupId, - tags: tags, whitelistRules: whitelistRules ) return try await self.updateInputSecurityGroup(input, logger: logger) diff --git a/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift b/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift index e79c899a78..e7399b108b 100644 --- a/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift +++ b/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift @@ -424,6 +424,12 @@ extension MediaLive { public var description: String { return self.rawValue } } + public enum CmafKLVBehavior: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case noPassthrough = "NO_PASSTHROUGH" + case passthrough = "PASSTHROUGH" + public var description: String { return self.rawValue } + } + public enum CmafNielsenId3Behavior: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case noPassthrough = "NO_PASSTHROUGH" case passthrough = "PASSTHROUGH" @@ -1000,6 +1006,12 @@ extension MediaLive { public var description: String { return self.rawValue } } + public enum H265Deblocking: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum H265FlickerAq: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case disabled = "DISABLED" case enabled = "ENABLED" @@ -4269,8 +4281,16 @@ extension MediaLive { public struct CmafIngestGroupSettings: AWSEncodableShape & AWSDecodableShape { /// A HTTP destination for the tracks public let destination: OutputLocationRef? + /// If set to passthrough, passes any KLV data from the input source to this output. + public let klvBehavior: CmafKLVBehavior? + /// Change the modifier that MediaLive automatically adds to the Streams() name that identifies a KLV track. The default is "klv", which means the default name will be Streams(klv.cmfm). Any string you enter here will replace the "klv" string.\nThe modifier can only contain: numbers, letters, plus (+), minus (-), underscore (_) and period (.) and has a maximum length of 100 characters. + public let klvNameModifier: String? /// If set to passthrough, Nielsen inaudible tones for media tracking will be detected in the input audio and an equivalent ID3 tag will be inserted in the output. public let nielsenId3Behavior: CmafNielsenId3Behavior? + /// Change the modifier that MediaLive automatically adds to the Streams() name that identifies a Nielsen ID3 track. The default is "nid3", which means the default name will be Streams(nid3.cmfm). Any string you enter here will replace the "nid3" string.\nThe modifier can only contain: numbers, letters, plus (+), minus (-), underscore (_) and period (.) and has a maximum length of 100 characters. + public let nielsenId3NameModifier: String? + /// Change the modifier that MediaLive automatically adds to the Streams() name for a SCTE 35 track. The default is "scte", which means the default name will be Streams(scte.cmfm). Any string you enter here will replace the "scte" string.\nThe modifier can only contain: numbers, letters, plus (+), minus (-), underscore (_) and period (.) and has a maximum length of 100 characters. + public let scte35NameModifier: String? /// Type of scte35 track to add. none or scte35WithoutSegmentation public let scte35Type: Scte35Type? /// The nominal duration of segments. The units are specified in SegmentLengthUnits. The segments will end on the next keyframe after the specified duration, so the actual segment length might be longer, and it might be a fraction of the units. @@ -4281,9 +4301,13 @@ extension MediaLive { public let sendDelayMs: Int? @inlinable - public init(destination: OutputLocationRef? = nil, nielsenId3Behavior: CmafNielsenId3Behavior? = nil, scte35Type: Scte35Type? = nil, segmentLength: Int? = nil, segmentLengthUnits: CmafIngestSegmentLengthUnits? = nil, sendDelayMs: Int? = nil) { + public init(destination: OutputLocationRef? = nil, klvBehavior: CmafKLVBehavior? = nil, klvNameModifier: String? = nil, nielsenId3Behavior: CmafNielsenId3Behavior? = nil, nielsenId3NameModifier: String? = nil, scte35NameModifier: String? = nil, scte35Type: Scte35Type? = nil, segmentLength: Int? = nil, segmentLengthUnits: CmafIngestSegmentLengthUnits? = nil, sendDelayMs: Int? = nil) { self.destination = destination + self.klvBehavior = klvBehavior + self.klvNameModifier = klvNameModifier self.nielsenId3Behavior = nielsenId3Behavior + self.nielsenId3NameModifier = nielsenId3NameModifier + self.scte35NameModifier = scte35NameModifier self.scte35Type = scte35Type self.segmentLength = segmentLength self.segmentLengthUnits = segmentLengthUnits @@ -4291,6 +4315,9 @@ extension MediaLive { } public func validate(name: String) throws { + try self.validate(self.klvNameModifier, name: "klvNameModifier", parent: name, max: 100) + try self.validate(self.nielsenId3NameModifier, name: "nielsenId3NameModifier", parent: name, max: 100) + try self.validate(self.scte35NameModifier, name: "scte35NameModifier", parent: name, max: 100) try self.validate(self.segmentLength, name: "segmentLength", parent: name, min: 1) try self.validate(self.sendDelayMs, name: "sendDelayMs", parent: name, max: 2000) try self.validate(self.sendDelayMs, name: "sendDelayMs", parent: name, min: 0) @@ -4298,7 +4325,11 @@ extension MediaLive { private enum CodingKeys: String, CodingKey { case destination = "destination" + case klvBehavior = "klvBehavior" + case klvNameModifier = "klvNameModifier" case nielsenId3Behavior = "nielsenId3Behavior" + case nielsenId3NameModifier = "nielsenId3NameModifier" + case scte35NameModifier = "scte35NameModifier" case scte35Type = "scte35Type" case segmentLength = "segmentLength" case segmentLengthUnits = "segmentLengthUnits" @@ -8391,7 +8422,7 @@ extension MediaLive { public let audioRenditionSets: String? /// If set to passthrough, Nielsen inaudible tones for media tracking will be detected in the input audio and an equivalent ID3 tag will be inserted in the output. public let nielsenId3Behavior: Fmp4NielsenId3Behavior? - /// When set to passthrough, timed metadata is passed through from input to output. + /// Set to PASSTHROUGH to enable ID3 metadata insertion. To include metadata, you configure other parameters in the output group or individual outputs, or you add an ID3 action to the channel schedule. public let timedMetadataBehavior: Fmp4TimedMetadataBehavior? @inlinable @@ -9257,6 +9288,10 @@ extension MediaLive { public let colorMetadata: H265ColorMetadata? /// Color Space settings public let colorSpaceSettings: H265ColorSpaceSettings? + /// Enable or disable the deblocking filter for this codec. The filter reduces blocking artifacts at block boundaries, + /// which improves overall video quality. If the filter is disabled, visible block edges might appear in the output, + /// especially at lower bitrates. + public let deblocking: H265Deblocking? /// Optional. Both filters reduce bandwidth by removing imperceptible details. You can enable one of the filters. We /// recommend that you try both filters and observe the results to decide which one to use. /// The Temporal Filter reduces bandwidth by removing imperceptible details in the content. It combines perceptual @@ -9347,7 +9382,7 @@ extension MediaLive { public let treeblockSize: H265TreeblockSize? @inlinable - public init(adaptiveQuantization: H265AdaptiveQuantization? = nil, afdSignaling: AfdSignaling? = nil, alternativeTransferFunction: H265AlternativeTransferFunction? = nil, bitrate: Int? = nil, bufSize: Int? = nil, colorMetadata: H265ColorMetadata? = nil, colorSpaceSettings: H265ColorSpaceSettings? = nil, filterSettings: H265FilterSettings? = nil, fixedAfd: FixedAfd? = nil, flickerAq: H265FlickerAq? = nil, framerateDenominator: Int? = nil, framerateNumerator: Int? = nil, gopClosedCadence: Int? = nil, gopSize: Double? = nil, gopSizeUnits: H265GopSizeUnits? = nil, level: H265Level? = nil, lookAheadRateControl: H265LookAheadRateControl? = nil, maxBitrate: Int? = nil, minIInterval: Int? = nil, minQp: Int? = nil, mvOverPictureBoundaries: H265MvOverPictureBoundaries? = nil, mvTemporalPredictor: H265MvTemporalPredictor? = nil, parDenominator: Int? = nil, parNumerator: Int? = nil, profile: H265Profile? = nil, qvbrQualityLevel: Int? = nil, rateControlMode: H265RateControlMode? = nil, scanType: H265ScanType? = nil, sceneChangeDetect: H265SceneChangeDetect? = nil, slices: Int? = nil, tier: H265Tier? = nil, tileHeight: Int? = nil, tilePadding: H265TilePadding? = nil, tileWidth: Int? = nil, timecodeBurninSettings: TimecodeBurninSettings? = nil, timecodeInsertion: H265TimecodeInsertionBehavior? = nil, treeblockSize: H265TreeblockSize? = nil) { + public init(adaptiveQuantization: H265AdaptiveQuantization? = nil, afdSignaling: AfdSignaling? = nil, alternativeTransferFunction: H265AlternativeTransferFunction? = nil, bitrate: Int? = nil, bufSize: Int? = nil, colorMetadata: H265ColorMetadata? = nil, colorSpaceSettings: H265ColorSpaceSettings? = nil, deblocking: H265Deblocking? = nil, filterSettings: H265FilterSettings? = nil, fixedAfd: FixedAfd? = nil, flickerAq: H265FlickerAq? = nil, framerateDenominator: Int? = nil, framerateNumerator: Int? = nil, gopClosedCadence: Int? = nil, gopSize: Double? = nil, gopSizeUnits: H265GopSizeUnits? = nil, level: H265Level? = nil, lookAheadRateControl: H265LookAheadRateControl? = nil, maxBitrate: Int? = nil, minIInterval: Int? = nil, minQp: Int? = nil, mvOverPictureBoundaries: H265MvOverPictureBoundaries? = nil, mvTemporalPredictor: H265MvTemporalPredictor? = nil, parDenominator: Int? = nil, parNumerator: Int? = nil, profile: H265Profile? = nil, qvbrQualityLevel: Int? = nil, rateControlMode: H265RateControlMode? = nil, scanType: H265ScanType? = nil, sceneChangeDetect: H265SceneChangeDetect? = nil, slices: Int? = nil, tier: H265Tier? = nil, tileHeight: Int? = nil, tilePadding: H265TilePadding? = nil, tileWidth: Int? = nil, timecodeBurninSettings: TimecodeBurninSettings? = nil, timecodeInsertion: H265TimecodeInsertionBehavior? = nil, treeblockSize: H265TreeblockSize? = nil) { self.adaptiveQuantization = adaptiveQuantization self.afdSignaling = afdSignaling self.alternativeTransferFunction = alternativeTransferFunction @@ -9355,6 +9390,7 @@ extension MediaLive { self.bufSize = bufSize self.colorMetadata = colorMetadata self.colorSpaceSettings = colorSpaceSettings + self.deblocking = deblocking self.filterSettings = filterSettings self.fixedAfd = fixedAfd self.flickerAq = flickerAq @@ -9424,6 +9460,7 @@ extension MediaLive { case bufSize = "bufSize" case colorMetadata = "colorMetadata" case colorSpaceSettings = "colorSpaceSettings" + case deblocking = "deblocking" case filterSettings = "filterSettings" case fixedAfd = "fixedAfd" case flickerAq = "flickerAq" @@ -9823,9 +9860,9 @@ extension MediaLive { } public struct HlsId3SegmentTaggingScheduleActionSettings: AWSEncodableShape & AWSDecodableShape { - /// Base64 string formatted according to the ID3 specification: http://id3.org/id3v2.4.0-structure + /// Complete this parameter if you want to specify the entire ID3 metadata. Enter a base64 string that contains one or more fully formed ID3 tags, according to the ID3 specification: http://id3.org/id3v2.4.0-structure public let id3: String? - /// ID3 tag to insert into each segment. Supports special keyword identifiers to substitute in segment-related values.\nSupported keyword identifiers: https://docs.aws.amazon.com/medialive/latest/ug/variable-data-identifiers.html + /// Complete this parameter if you want to specify only the metadata, not the entire frame. MediaLive will insert the metadata in a TXXX frame. Enter the value as plain text. You can include standard MediaLive variable data such as the current segment number. public let tag: String? @inlinable @@ -9990,7 +10027,7 @@ extension MediaLive { } public struct HlsTimedMetadataScheduleActionSettings: AWSEncodableShape & AWSDecodableShape { - /// Base64 string formatted according to the ID3 specification: http://id3.org/id3v2.4.0-structure + /// Enter a base64 string that contains one or more fully formed ID3 tags.See the ID3 specification: http://id3.org/id3v2.4.0-structure public let id3: String? @inlinable @@ -12354,7 +12391,7 @@ extension MediaLive { public let scte35Behavior: M3u8Scte35Behavior? /// Packet Identifier (PID) of the SCTE-35 stream in the transport stream. Can be entered as a decimal or hexadecimal value. public let scte35Pid: String? - /// When set to passthrough, timed metadata is passed through from input to output. + /// Set to PASSTHROUGH to enable ID3 metadata insertion. To include metadata, you configure other parameters in the output group or individual outputs, or you add an ID3 action to the channel schedule. public let timedMetadataBehavior: M3u8TimedMetadataBehavior? /// Packet Identifier (PID) of the timed metadata stream in the transport stream. Can be entered as a decimal or hexadecimal value. Valid values are 32 (or 0x20)..8182 (or 0x1ff6). public let timedMetadataPid: String? @@ -12540,20 +12577,30 @@ extension MediaLive { } public struct MediaPackageOutputDestinationSettings: AWSEncodableShape & AWSDecodableShape { + /// Name of the channel group in MediaPackageV2. Only use if you are sending CMAF Ingest output to a CMAF ingest endpoint on a MediaPackage channel that uses MediaPackage v2. + public let channelGroup: String? /// ID of the channel in MediaPackage that is the destination for this output group. You do not need to specify the individual inputs in MediaPackage; MediaLive will handle the connection of the two MediaLive pipelines to the two MediaPackage inputs. The MediaPackage channel and MediaLive channel must be in the same region. public let channelId: String? + /// Name of the channel in MediaPackageV2. Only use if you are sending CMAF Ingest output to a CMAF ingest endpoint on a MediaPackage channel that uses MediaPackage v2. + public let channelName: String? @inlinable - public init(channelId: String? = nil) { + public init(channelGroup: String? = nil, channelId: String? = nil, channelName: String? = nil) { + self.channelGroup = channelGroup self.channelId = channelId + self.channelName = channelName } public func validate(name: String) throws { + try self.validate(self.channelGroup, name: "channelGroup", parent: name, min: 1) try self.validate(self.channelId, name: "channelId", parent: name, min: 1) + try self.validate(self.channelName, name: "channelName", parent: name, min: 1) } private enum CodingKeys: String, CodingKey { + case channelGroup = "channelGroup" case channelId = "channelId" + case channelName = "channelName" } } @@ -14743,9 +14790,9 @@ extension MediaLive { } public struct ScheduleActionSettings: AWSEncodableShape & AWSDecodableShape { - /// Action to insert HLS ID3 segment tagging + /// Action to insert ID3 metadata in every segment, in HLS output groups public let hlsId3SegmentTaggingSettings: HlsId3SegmentTaggingScheduleActionSettings? - /// Action to insert HLS metadata + /// Action to insert ID3 metadata once, in HLS output groups public let hlsTimedMetadataSettings: HlsTimedMetadataScheduleActionSettings? /// Action to prepare an input for a future immediate input switch public let inputPrepareSettings: InputPrepareScheduleActionSettings? @@ -17622,6 +17669,14 @@ extension MediaLive { /// List of IPv4 CIDR addresses to whitelist public let whitelistRules: [InputWhitelistRuleCidr]? + @inlinable + public init(inputSecurityGroupId: String, whitelistRules: [InputWhitelistRuleCidr]? = nil) { + self.inputSecurityGroupId = inputSecurityGroupId + self.tags = nil + self.whitelistRules = whitelistRules + } + + @available(*, deprecated, message: "Members tags have been deprecated") @inlinable public init(inputSecurityGroupId: String, tags: [String: String]? = nil, whitelistRules: [InputWhitelistRuleCidr]? = nil) { self.inputSecurityGroupId = inputSecurityGroupId diff --git a/Sources/Soto/Services/MigrationHub/MigrationHub_api.swift b/Sources/Soto/Services/MigrationHub/MigrationHub_api.swift index 96dcf7c15c..c4b5e2088b 100644 --- a/Sources/Soto/Services/MigrationHub/MigrationHub_api.swift +++ b/Sources/Soto/Services/MigrationHub/MigrationHub_api.swift @@ -156,6 +156,44 @@ public struct MigrationHub: AWSService { return try await self.associateDiscoveredResource(input, logger: logger) } + /// Associates a source resource with a migration task. For example, the source resource can be a source server, an application, or a migration wave. + @Sendable + @inlinable + public func associateSourceResource(_ input: AssociateSourceResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssociateSourceResourceResult { + try await self.client.execute( + operation: "AssociateSourceResource", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Associates a source resource with a migration task. For example, the source resource can be a source server, an application, or a migration wave. + /// + /// Parameters: + /// - dryRun: This is an optional parameter that you can use to test whether the call will succeed. Set this parameter to true to verify that you have the permissions that are required to make the call, and that you have specified the other parameters in the call correctly. + /// - migrationTaskName: A unique identifier that references the migration task. Do not include sensitive data in this field. + /// - progressUpdateStream: The name of the progress-update stream, which is used for access control as well as a namespace for migration-task names that is implicitly linked to your AWS account. The progress-update stream must uniquely identify the migration tool as it is used for all updates made by the tool; however, it does not need to be unique for each AWS account because it is scoped to the AWS account. + /// - sourceResource: The source resource that you want to associate. + /// - logger: Logger use during operation + @inlinable + public func associateSourceResource( + dryRun: Bool? = nil, + migrationTaskName: String, + progressUpdateStream: String, + sourceResource: SourceResource, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> AssociateSourceResourceResult { + let input = AssociateSourceResourceRequest( + dryRun: dryRun, + migrationTaskName: migrationTaskName, + progressUpdateStream: progressUpdateStream, + sourceResource: sourceResource + ) + return try await self.associateSourceResource(input, logger: logger) + } + /// Creates a progress update stream which is an AWS resource used for access control as well as a namespace for migration task names that is implicitly linked to your AWS account. It must uniquely identify the migration tool as it is used for all updates made by the tool; however, it does not need to be unique for each AWS account because it is scoped to the AWS account. @Sendable @inlinable @@ -357,6 +395,44 @@ public struct MigrationHub: AWSService { return try await self.disassociateDiscoveredResource(input, logger: logger) } + /// Removes the association between a source resource and a migration task. + @Sendable + @inlinable + public func disassociateSourceResource(_ input: DisassociateSourceResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DisassociateSourceResourceResult { + try await self.client.execute( + operation: "DisassociateSourceResource", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Removes the association between a source resource and a migration task. + /// + /// Parameters: + /// - dryRun: This is an optional parameter that you can use to test whether the call will succeed. Set this parameter to true to verify that you have the permissions that are required to make the call, and that you have specified the other parameters in the call correctly. + /// - migrationTaskName: A unique identifier that references the migration task. Do not include sensitive data in this field. + /// - progressUpdateStream: The name of the progress-update stream, which is used for access control as well as a namespace for migration-task names that is implicitly linked to your AWS account. The progress-update stream must uniquely identify the migration tool as it is used for all updates made by the tool; however, it does not need to be unique for each AWS account because it is scoped to the AWS account. + /// - sourceResourceName: The name that was specified for the source resource. + /// - logger: Logger use during operation + @inlinable + public func disassociateSourceResource( + dryRun: Bool? = nil, + migrationTaskName: String, + progressUpdateStream: String, + sourceResourceName: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DisassociateSourceResourceResult { + let input = DisassociateSourceResourceRequest( + dryRun: dryRun, + migrationTaskName: migrationTaskName, + progressUpdateStream: progressUpdateStream, + sourceResourceName: sourceResourceName + ) + return try await self.disassociateSourceResource(input, logger: logger) + } + /// Registers a new migration task which represents a server, database, etc., being migrated to AWS by a migration tool. This API is a prerequisite to calling the NotifyMigrationTaskState API as the migration tool must first register the migration task with Migration Hub. @Sendable @inlinable @@ -503,6 +579,44 @@ public struct MigrationHub: AWSService { return try await self.listDiscoveredResources(input, logger: logger) } + /// This is a paginated API that returns all the migration-task states for the specified MigrationTaskName and ProgressUpdateStream. + @Sendable + @inlinable + public func listMigrationTaskUpdates(_ input: ListMigrationTaskUpdatesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListMigrationTaskUpdatesResult { + try await self.client.execute( + operation: "ListMigrationTaskUpdates", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// This is a paginated API that returns all the migration-task states for the specified MigrationTaskName and ProgressUpdateStream. + /// + /// Parameters: + /// - maxResults: The maximum number of results to include in the response. If more results exist than the value that you specify here for MaxResults, the response will include a token that you can use to retrieve the next set of results. + /// - migrationTaskName: A unique identifier that references the migration task. Do not include sensitive data in this field. + /// - nextToken: If NextToken was returned by a previous call, there are more results available. The value of NextToken is a unique pagination token for each page. To retrieve the next page of results, specify the NextToken value that the previous call returned. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error. + /// - progressUpdateStream: The name of the progress-update stream, which is used for access control as well as a namespace for migration-task names that is implicitly linked to your AWS account. The progress-update stream must uniquely identify the migration tool as it is used for all updates made by the tool; however, it does not need to be unique for each AWS account because it is scoped to the AWS account. + /// - logger: Logger use during operation + @inlinable + public func listMigrationTaskUpdates( + maxResults: Int? = nil, + migrationTaskName: String, + nextToken: String? = nil, + progressUpdateStream: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListMigrationTaskUpdatesResult { + let input = ListMigrationTaskUpdatesRequest( + maxResults: maxResults, + migrationTaskName: migrationTaskName, + nextToken: nextToken, + progressUpdateStream: progressUpdateStream + ) + return try await self.listMigrationTaskUpdates(input, logger: logger) + } + /// Lists all, or filtered by resource name, migration tasks associated with the user account making this call. This API has the following traits: Can show a summary list of the most recent migration tasks. Can show a summary list of migration tasks associated with a given discovered resource. Lists migration tasks in a paginated interface. @Sendable @inlinable @@ -570,6 +684,44 @@ public struct MigrationHub: AWSService { return try await self.listProgressUpdateStreams(input, logger: logger) } + /// Lists all the source resource that are associated with the specified MigrationTaskName and ProgressUpdateStream. + @Sendable + @inlinable + public func listSourceResources(_ input: ListSourceResourcesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListSourceResourcesResult { + try await self.client.execute( + operation: "ListSourceResources", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists all the source resource that are associated with the specified MigrationTaskName and ProgressUpdateStream. + /// + /// Parameters: + /// - maxResults: The maximum number of results to include in the response. If more results exist than the value that you specify here for MaxResults, the response will include a token that you can use to retrieve the next set of results. + /// - migrationTaskName: A unique identifier that references the migration task. Do not store confidential data in this field. + /// - nextToken: If NextToken was returned by a previous call, there are more results available. The value of NextToken is a unique pagination token for each page. To retrieve the next page of results, specify the NextToken value that the previous call returned. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error. + /// - progressUpdateStream: The name of the progress-update stream, which is used for access control as well as a namespace for migration-task names that is implicitly linked to your AWS account. The progress-update stream must uniquely identify the migration tool as it is used for all updates made by the tool; however, it does not need to be unique for each AWS account because it is scoped to the AWS account. + /// - logger: Logger use during operation + @inlinable + public func listSourceResources( + maxResults: Int? = nil, + migrationTaskName: String, + nextToken: String? = nil, + progressUpdateStream: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListSourceResourcesResult { + let input = ListSourceResourcesRequest( + maxResults: maxResults, + migrationTaskName: migrationTaskName, + nextToken: nextToken, + progressUpdateStream: progressUpdateStream + ) + return try await self.listSourceResources(input, logger: logger) + } + /// Sets the migration state of an application. For a given application identified by the value passed to ApplicationId, its status is set or updated by passing one of three values to Status: NOT_STARTED | IN_PROGRESS | COMPLETED. @Sendable @inlinable @@ -652,8 +804,7 @@ public struct MigrationHub: AWSService { return try await self.notifyMigrationTaskState(input, logger: logger) } - /// Provides identifying details of the resource being migrated so that it can be associated in the Application Discovery Service repository. This association occurs asynchronously after PutResourceAttributes returns. Keep in mind that subsequent calls to PutResourceAttributes will override previously stored attributes. For example, if it is first called with a MAC address, but later, it is desired to add an IP address, it will then be required to call it with both the IP and MAC addresses to prevent overriding the MAC address. Note the instructions regarding the special use case of the ResourceAttributeList parameter when specifying any "VM" related value. - /// Because this is an asynchronous call, it will always return 200, whether an association occurs or not. To confirm if an association was found based on the provided details, call ListDiscoveredResources. + /// Provides identifying details of the resource being migrated so that it can be associated in the Application Discovery Service repository. This association occurs asynchronously after PutResourceAttributes returns. Keep in mind that subsequent calls to PutResourceAttributes will override previously stored attributes. For example, if it is first called with a MAC address, but later, it is desired to add an IP address, it will then be required to call it with both the IP and MAC addresses to prevent overriding the MAC address. Note the instructions regarding the special use case of the ResourceAttributeList parameter when specifying any "VM" related value. Because this is an asynchronous call, it will always return 200, whether an association occurs or not. To confirm if an association was found based on the provided details, call ListDiscoveredResources. @Sendable @inlinable public func putResourceAttributes(_ input: PutResourceAttributesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutResourceAttributesResult { @@ -666,14 +817,13 @@ public struct MigrationHub: AWSService { logger: logger ) } - /// Provides identifying details of the resource being migrated so that it can be associated in the Application Discovery Service repository. This association occurs asynchronously after PutResourceAttributes returns. Keep in mind that subsequent calls to PutResourceAttributes will override previously stored attributes. For example, if it is first called with a MAC address, but later, it is desired to add an IP address, it will then be required to call it with both the IP and MAC addresses to prevent overriding the MAC address. Note the instructions regarding the special use case of the ResourceAttributeList parameter when specifying any "VM" related value. - /// Because this is an asynchronous call, it will always return 200, whether an association occurs or not. To confirm if an association was found based on the provided details, call ListDiscoveredResources. + /// Provides identifying details of the resource being migrated so that it can be associated in the Application Discovery Service repository. This association occurs asynchronously after PutResourceAttributes returns. Keep in mind that subsequent calls to PutResourceAttributes will override previously stored attributes. For example, if it is first called with a MAC address, but later, it is desired to add an IP address, it will then be required to call it with both the IP and MAC addresses to prevent overriding the MAC address. Note the instructions regarding the special use case of the ResourceAttributeList parameter when specifying any "VM" related value. Because this is an asynchronous call, it will always return 200, whether an association occurs or not. To confirm if an association was found based on the provided details, call ListDiscoveredResources. /// /// Parameters: /// - dryRun: Optional boolean flag to indicate whether any effect should take place. Used to test if the caller has permission to make the call. /// - migrationTaskName: Unique identifier that references the migration task. Do not store personal data in this field. /// - progressUpdateStream: The name of the ProgressUpdateStream. - /// - resourceAttributeList: Information about the resource that is being migrated. This data will be used to map the task to a resource in the Application Discovery Service repository. Takes the object array of ResourceAttribute where the Type field is reserved for the following values: IPV4_ADDRESS | IPV6_ADDRESS | MAC_ADDRESS | FQDN | VM_MANAGER_ID | VM_MANAGED_OBJECT_REFERENCE | VM_NAME | VM_PATH | BIOS_ID | MOTHERBOARD_SERIAL_NUMBER where the identifying value can be a string up to 256 characters. + /// - resourceAttributeList: Information about the resource that is being migrated. This data will be used to map the task to a resource in the Application Discovery Service repository. Takes the object array of ResourceAttribute where the Type field is reserved for the following values: IPV4_ADDRESS | IPV6_ADDRESS | MAC_ADDRESS | FQDN | VM_MANAGER_ID | VM_MANAGED_OBJECT_REFERENCE | VM_NAME | VM_PATH | BIOS_ID | MOTHERBOARD_SERIAL_NUMBER where the identifying value can be a string up to 256 characters. If any "VM" related value is set for a ResourceAttribute object, it is required that VM_MANAGER_ID, as a minimum, is always set. If VM_MANAGER_ID is not set, then all "VM" fields will be discarded and "VM" fields will not be used for matching the migration task to a server in Application Discovery Service repository. See the Example section below for a use case of specifying "VM" related values. If a server you are trying to match has multiple IP or MAC addresses, you should provide as many as you know in separate type/value pairs passed to the ResourceAttributeList parameter to maximize the chances of matching. /// - logger: Logger use during operation @inlinable public func putResourceAttributes( @@ -823,6 +973,46 @@ extension MigrationHub { return self.listDiscoveredResourcesPaginator(input, logger: logger) } + /// Return PaginatorSequence for operation ``listMigrationTaskUpdates(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listMigrationTaskUpdatesPaginator( + _ input: ListMigrationTaskUpdatesRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listMigrationTaskUpdates, + inputKey: \ListMigrationTaskUpdatesRequest.nextToken, + outputKey: \ListMigrationTaskUpdatesResult.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listMigrationTaskUpdates(_:logger:)``. + /// + /// - Parameters: + /// - maxResults: The maximum number of results to include in the response. If more results exist than the value that you specify here for MaxResults, the response will include a token that you can use to retrieve the next set of results. + /// - migrationTaskName: A unique identifier that references the migration task. Do not include sensitive data in this field. + /// - progressUpdateStream: The name of the progress-update stream, which is used for access control as well as a namespace for migration-task names that is implicitly linked to your AWS account. The progress-update stream must uniquely identify the migration tool as it is used for all updates made by the tool; however, it does not need to be unique for each AWS account because it is scoped to the AWS account. + /// - logger: Logger used for logging + @inlinable + public func listMigrationTaskUpdatesPaginator( + maxResults: Int? = nil, + migrationTaskName: String, + progressUpdateStream: String, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListMigrationTaskUpdatesRequest( + maxResults: maxResults, + migrationTaskName: migrationTaskName, + progressUpdateStream: progressUpdateStream + ) + return self.listMigrationTaskUpdatesPaginator(input, logger: logger) + } + /// Return PaginatorSequence for operation ``listMigrationTasks(_:logger:)``. /// /// - Parameters: @@ -893,6 +1083,46 @@ extension MigrationHub { ) return self.listProgressUpdateStreamsPaginator(input, logger: logger) } + + /// Return PaginatorSequence for operation ``listSourceResources(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listSourceResourcesPaginator( + _ input: ListSourceResourcesRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listSourceResources, + inputKey: \ListSourceResourcesRequest.nextToken, + outputKey: \ListSourceResourcesResult.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listSourceResources(_:logger:)``. + /// + /// - Parameters: + /// - maxResults: The maximum number of results to include in the response. If more results exist than the value that you specify here for MaxResults, the response will include a token that you can use to retrieve the next set of results. + /// - migrationTaskName: A unique identifier that references the migration task. Do not store confidential data in this field. + /// - progressUpdateStream: The name of the progress-update stream, which is used for access control as well as a namespace for migration-task names that is implicitly linked to your AWS account. The progress-update stream must uniquely identify the migration tool as it is used for all updates made by the tool; however, it does not need to be unique for each AWS account because it is scoped to the AWS account. + /// - logger: Logger used for logging + @inlinable + public func listSourceResourcesPaginator( + maxResults: Int? = nil, + migrationTaskName: String, + progressUpdateStream: String, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListSourceResourcesRequest( + maxResults: maxResults, + migrationTaskName: migrationTaskName, + progressUpdateStream: progressUpdateStream + ) + return self.listSourceResourcesPaginator(input, logger: logger) + } } extension MigrationHub.ListApplicationStatesRequest: AWSPaginateToken { @@ -930,6 +1160,18 @@ extension MigrationHub.ListDiscoveredResourcesRequest: AWSPaginateToken { } } +extension MigrationHub.ListMigrationTaskUpdatesRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> MigrationHub.ListMigrationTaskUpdatesRequest { + return .init( + maxResults: self.maxResults, + migrationTaskName: self.migrationTaskName, + nextToken: token, + progressUpdateStream: self.progressUpdateStream + ) + } +} + extension MigrationHub.ListMigrationTasksRequest: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> MigrationHub.ListMigrationTasksRequest { @@ -950,3 +1192,15 @@ extension MigrationHub.ListProgressUpdateStreamsRequest: AWSPaginateToken { ) } } + +extension MigrationHub.ListSourceResourcesRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> MigrationHub.ListSourceResourcesRequest { + return .init( + maxResults: self.maxResults, + migrationTaskName: self.migrationTaskName, + nextToken: token, + progressUpdateStream: self.progressUpdateStream + ) + } +} diff --git a/Sources/Soto/Services/MigrationHub/MigrationHub_shapes.swift b/Sources/Soto/Services/MigrationHub/MigrationHub_shapes.swift index aea37ac6d2..f3b3266403 100644 --- a/Sources/Soto/Services/MigrationHub/MigrationHub_shapes.swift +++ b/Sources/Soto/Services/MigrationHub/MigrationHub_shapes.swift @@ -55,6 +55,11 @@ extension MigrationHub { public var description: String { return self.rawValue } } + public enum UpdateType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case migrationTaskStateUpdated = "MIGRATION_TASK_STATE_UPDATED" + public var description: String { return self.rawValue } + } + // MARK: Shapes public struct ApplicationState: AWSDecodableShape { @@ -159,6 +164,46 @@ extension MigrationHub { public init() {} } + public struct AssociateSourceResourceRequest: AWSEncodableShape { + /// This is an optional parameter that you can use to test whether the call will succeed. Set this parameter to true to verify that you have the permissions that are required to make the call, and that you have specified the other parameters in the call correctly. + public let dryRun: Bool? + /// A unique identifier that references the migration task. Do not include sensitive data in this field. + public let migrationTaskName: String + /// The name of the progress-update stream, which is used for access control as well as a namespace for migration-task names that is implicitly linked to your AWS account. The progress-update stream must uniquely identify the migration tool as it is used for all updates made by the tool; however, it does not need to be unique for each AWS account because it is scoped to the AWS account. + public let progressUpdateStream: String + /// The source resource that you want to associate. + public let sourceResource: SourceResource + + @inlinable + public init(dryRun: Bool? = nil, migrationTaskName: String, progressUpdateStream: String, sourceResource: SourceResource) { + self.dryRun = dryRun + self.migrationTaskName = migrationTaskName + self.progressUpdateStream = progressUpdateStream + self.sourceResource = sourceResource + } + + public func validate(name: String) throws { + try self.validate(self.migrationTaskName, name: "migrationTaskName", parent: name, max: 256) + try self.validate(self.migrationTaskName, name: "migrationTaskName", parent: name, min: 1) + try self.validate(self.migrationTaskName, name: "migrationTaskName", parent: name, pattern: "^[^:|]+$") + try self.validate(self.progressUpdateStream, name: "progressUpdateStream", parent: name, max: 50) + try self.validate(self.progressUpdateStream, name: "progressUpdateStream", parent: name, min: 1) + try self.validate(self.progressUpdateStream, name: "progressUpdateStream", parent: name, pattern: "^[^/:|\\000-\\037]+$") + try self.sourceResource.validate(name: "\(name).sourceResource") + } + + private enum CodingKeys: String, CodingKey { + case dryRun = "DryRun" + case migrationTaskName = "MigrationTaskName" + case progressUpdateStream = "ProgressUpdateStream" + case sourceResource = "SourceResource" + } + } + + public struct AssociateSourceResourceResult: AWSDecodableShape { + public init() {} + } + public struct CreateProgressUpdateStreamRequest: AWSEncodableShape { /// Optional boolean flag to indicate whether any effect should take place. Used to test if the caller has permission to make the call. public let dryRun: Bool? @@ -404,6 +449,47 @@ extension MigrationHub { public init() {} } + public struct DisassociateSourceResourceRequest: AWSEncodableShape { + /// This is an optional parameter that you can use to test whether the call will succeed. Set this parameter to true to verify that you have the permissions that are required to make the call, and that you have specified the other parameters in the call correctly. + public let dryRun: Bool? + /// A unique identifier that references the migration task. Do not include sensitive data in this field. + public let migrationTaskName: String + /// The name of the progress-update stream, which is used for access control as well as a namespace for migration-task names that is implicitly linked to your AWS account. The progress-update stream must uniquely identify the migration tool as it is used for all updates made by the tool; however, it does not need to be unique for each AWS account because it is scoped to the AWS account. + public let progressUpdateStream: String + /// The name that was specified for the source resource. + public let sourceResourceName: String + + @inlinable + public init(dryRun: Bool? = nil, migrationTaskName: String, progressUpdateStream: String, sourceResourceName: String) { + self.dryRun = dryRun + self.migrationTaskName = migrationTaskName + self.progressUpdateStream = progressUpdateStream + self.sourceResourceName = sourceResourceName + } + + public func validate(name: String) throws { + try self.validate(self.migrationTaskName, name: "migrationTaskName", parent: name, max: 256) + try self.validate(self.migrationTaskName, name: "migrationTaskName", parent: name, min: 1) + try self.validate(self.migrationTaskName, name: "migrationTaskName", parent: name, pattern: "^[^:|]+$") + try self.validate(self.progressUpdateStream, name: "progressUpdateStream", parent: name, max: 50) + try self.validate(self.progressUpdateStream, name: "progressUpdateStream", parent: name, min: 1) + try self.validate(self.progressUpdateStream, name: "progressUpdateStream", parent: name, pattern: "^[^/:|\\000-\\037]+$") + try self.validate(self.sourceResourceName, name: "sourceResourceName", parent: name, max: 1600) + try self.validate(self.sourceResourceName, name: "sourceResourceName", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case dryRun = "DryRun" + case migrationTaskName = "MigrationTaskName" + case progressUpdateStream = "ProgressUpdateStream" + case sourceResourceName = "SourceResourceName" + } + } + + public struct DisassociateSourceResourceResult: AWSDecodableShape { + public init() {} + } + public struct DiscoveredResource: AWSEncodableShape & AWSDecodableShape { /// The configurationId in Application Discovery Service that uniquely identifies the on-premise resource. public let configurationId: String @@ -633,6 +719,63 @@ extension MigrationHub { } } + public struct ListMigrationTaskUpdatesRequest: AWSEncodableShape { + /// The maximum number of results to include in the response. If more results exist than the value that you specify here for MaxResults, the response will include a token that you can use to retrieve the next set of results. + public let maxResults: Int? + /// A unique identifier that references the migration task. Do not include sensitive data in this field. + public let migrationTaskName: String + /// If NextToken was returned by a previous call, there are more results available. The value of NextToken is a unique pagination token for each page. To retrieve the next page of results, specify the NextToken value that the previous call returned. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error. + public let nextToken: String? + /// The name of the progress-update stream, which is used for access control as well as a namespace for migration-task names that is implicitly linked to your AWS account. The progress-update stream must uniquely identify the migration tool as it is used for all updates made by the tool; however, it does not need to be unique for each AWS account because it is scoped to the AWS account. + public let progressUpdateStream: String + + @inlinable + public init(maxResults: Int? = nil, migrationTaskName: String, nextToken: String? = nil, progressUpdateStream: String) { + self.maxResults = maxResults + self.migrationTaskName = migrationTaskName + self.nextToken = nextToken + self.progressUpdateStream = progressUpdateStream + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.migrationTaskName, name: "migrationTaskName", parent: name, max: 256) + try self.validate(self.migrationTaskName, name: "migrationTaskName", parent: name, min: 1) + try self.validate(self.migrationTaskName, name: "migrationTaskName", parent: name, pattern: "^[^:|]+$") + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^[a-zA-Z0-9\\/\\+\\=]{0,2048}$") + try self.validate(self.progressUpdateStream, name: "progressUpdateStream", parent: name, max: 50) + try self.validate(self.progressUpdateStream, name: "progressUpdateStream", parent: name, min: 1) + try self.validate(self.progressUpdateStream, name: "progressUpdateStream", parent: name, pattern: "^[^/:|\\000-\\037]+$") + } + + private enum CodingKeys: String, CodingKey { + case maxResults = "MaxResults" + case migrationTaskName = "MigrationTaskName" + case nextToken = "NextToken" + case progressUpdateStream = "ProgressUpdateStream" + } + } + + public struct ListMigrationTaskUpdatesResult: AWSDecodableShape { + /// The list of migration-task updates. + public let migrationTaskUpdateList: [MigrationTaskUpdate]? + /// If the response includes a NextToken value, that means that there are more results available. The value of NextToken is a unique pagination token for each page. To retrieve the next page of results, call this API again and specify this NextToken value in the request. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error. + public let nextToken: String? + + @inlinable + public init(migrationTaskUpdateList: [MigrationTaskUpdate]? = nil, nextToken: String? = nil) { + self.migrationTaskUpdateList = migrationTaskUpdateList + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case migrationTaskUpdateList = "MigrationTaskUpdateList" + case nextToken = "NextToken" + } + } + public struct ListMigrationTasksRequest: AWSEncodableShape { /// Value to specify how many results are returned per page. public let maxResults: Int? @@ -726,6 +869,63 @@ extension MigrationHub { } } + public struct ListSourceResourcesRequest: AWSEncodableShape { + /// The maximum number of results to include in the response. If more results exist than the value that you specify here for MaxResults, the response will include a token that you can use to retrieve the next set of results. + public let maxResults: Int? + /// A unique identifier that references the migration task. Do not store confidential data in this field. + public let migrationTaskName: String + /// If NextToken was returned by a previous call, there are more results available. The value of NextToken is a unique pagination token for each page. To retrieve the next page of results, specify the NextToken value that the previous call returned. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error. + public let nextToken: String? + /// The name of the progress-update stream, which is used for access control as well as a namespace for migration-task names that is implicitly linked to your AWS account. The progress-update stream must uniquely identify the migration tool as it is used for all updates made by the tool; however, it does not need to be unique for each AWS account because it is scoped to the AWS account. + public let progressUpdateStream: String + + @inlinable + public init(maxResults: Int? = nil, migrationTaskName: String, nextToken: String? = nil, progressUpdateStream: String) { + self.maxResults = maxResults + self.migrationTaskName = migrationTaskName + self.nextToken = nextToken + self.progressUpdateStream = progressUpdateStream + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 10) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.migrationTaskName, name: "migrationTaskName", parent: name, max: 256) + try self.validate(self.migrationTaskName, name: "migrationTaskName", parent: name, min: 1) + try self.validate(self.migrationTaskName, name: "migrationTaskName", parent: name, pattern: "^[^:|]+$") + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^[a-zA-Z0-9\\/\\+\\=]{0,2048}$") + try self.validate(self.progressUpdateStream, name: "progressUpdateStream", parent: name, max: 50) + try self.validate(self.progressUpdateStream, name: "progressUpdateStream", parent: name, min: 1) + try self.validate(self.progressUpdateStream, name: "progressUpdateStream", parent: name, pattern: "^[^/:|\\000-\\037]+$") + } + + private enum CodingKeys: String, CodingKey { + case maxResults = "MaxResults" + case migrationTaskName = "MigrationTaskName" + case nextToken = "NextToken" + case progressUpdateStream = "ProgressUpdateStream" + } + } + + public struct ListSourceResourcesResult: AWSDecodableShape { + /// If the response includes a NextToken value, that means that there are more results available. The value of NextToken is a unique pagination token for each page. To retrieve the next page of results, call this API again and specify this NextToken value in the request. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error. + public let nextToken: String? + /// The list of source resources. + public let sourceResourceList: [SourceResource]? + + @inlinable + public init(nextToken: String? = nil, sourceResourceList: [SourceResource]? = nil) { + self.nextToken = nextToken + self.sourceResourceList = sourceResourceList + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case sourceResourceList = "SourceResourceList" + } + } + public struct MigrationTask: AWSDecodableShape { /// Unique identifier that references the migration task. Do not store personal data in this field. public let migrationTaskName: String? @@ -790,6 +990,27 @@ extension MigrationHub { } } + public struct MigrationTaskUpdate: AWSDecodableShape { + public let migrationTaskState: Task? + /// The timestamp for the update. + public let updateDateTime: Date? + /// The type of the update. + public let updateType: UpdateType? + + @inlinable + public init(migrationTaskState: Task? = nil, updateDateTime: Date? = nil, updateType: UpdateType? = nil) { + self.migrationTaskState = migrationTaskState + self.updateDateTime = updateDateTime + self.updateType = updateType + } + + private enum CodingKeys: String, CodingKey { + case migrationTaskState = "MigrationTaskState" + case updateDateTime = "UpdateDateTime" + case updateType = "UpdateType" + } + } + public struct NotifyApplicationStateRequest: AWSEncodableShape { /// The configurationId in Application Discovery Service that uniquely identifies the grouped application. public let applicationId: String @@ -896,8 +1117,7 @@ extension MigrationHub { public let migrationTaskName: String /// The name of the ProgressUpdateStream. public let progressUpdateStream: String - /// Information about the resource that is being migrated. This data will be used to map the task to a resource in the Application Discovery Service repository. Takes the object array of ResourceAttribute where the Type field is reserved for the following values: IPV4_ADDRESS | IPV6_ADDRESS | MAC_ADDRESS | FQDN | VM_MANAGER_ID | VM_MANAGED_OBJECT_REFERENCE | VM_NAME | VM_PATH | BIOS_ID | MOTHERBOARD_SERIAL_NUMBER where the identifying value can be a string up to 256 characters. - /// If any "VM" related value is set for a ResourceAttribute object, it is required that VM_MANAGER_ID, as a minimum, is always set. If VM_MANAGER_ID is not set, then all "VM" fields will be discarded and "VM" fields will not be used for matching the migration task to a server in Application Discovery Service repository. See the Example section below for a use case of specifying "VM" related values. If a server you are trying to match has multiple IP or MAC addresses, you should provide as many as you know in separate type/value pairs passed to the ResourceAttributeList parameter to maximize the chances of matching. + /// Information about the resource that is being migrated. This data will be used to map the task to a resource in the Application Discovery Service repository. Takes the object array of ResourceAttribute where the Type field is reserved for the following values: IPV4_ADDRESS | IPV6_ADDRESS | MAC_ADDRESS | FQDN | VM_MANAGER_ID | VM_MANAGED_OBJECT_REFERENCE | VM_NAME | VM_PATH | BIOS_ID | MOTHERBOARD_SERIAL_NUMBER where the identifying value can be a string up to 256 characters. If any "VM" related value is set for a ResourceAttribute object, it is required that VM_MANAGER_ID, as a minimum, is always set. If VM_MANAGER_ID is not set, then all "VM" fields will be discarded and "VM" fields will not be used for matching the migration task to a server in Application Discovery Service repository. See the Example section below for a use case of specifying "VM" related values. If a server you are trying to match has multiple IP or MAC addresses, you should provide as many as you know in separate type/value pairs passed to the ResourceAttributeList parameter to maximize the chances of matching. public let resourceAttributeList: [ResourceAttribute] @inlinable @@ -958,6 +1178,37 @@ extension MigrationHub { } } + public struct SourceResource: AWSEncodableShape & AWSDecodableShape { + /// A description that can be free-form text to record additional detail about the resource for clarity or later reference. + public let description: String? + /// This is the name that you want to use to identify the resource. If the resource is an AWS resource, we recommend that you set this parameter to the ARN of the resource. + public let name: String + /// A free-form description of the status of the resource. + public let statusDetail: String? + + @inlinable + public init(description: String? = nil, name: String, statusDetail: String? = nil) { + self.description = description + self.name = name + self.statusDetail = statusDetail + } + + public func validate(name: String) throws { + try self.validate(self.description, name: "description", parent: name, max: 500) + try self.validate(self.description, name: "description", parent: name, pattern: "^.{0,500}$") + try self.validate(self.name, name: "name", parent: name, max: 1600) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.statusDetail, name: "statusDetail", parent: name, max: 2500) + try self.validate(self.statusDetail, name: "statusDetail", parent: name, pattern: "^.{0,2500}$") + } + + private enum CodingKeys: String, CodingKey { + case description = "Description" + case name = "Name" + case statusDetail = "StatusDetail" + } + } + public struct Task: AWSEncodableShape & AWSDecodableShape { /// Indication of the percentage completion of the task. public let progressPercent: Int? @@ -976,8 +1227,8 @@ extension MigrationHub { public func validate(name: String) throws { try self.validate(self.progressPercent, name: "progressPercent", parent: name, max: 100) try self.validate(self.progressPercent, name: "progressPercent", parent: name, min: 0) - try self.validate(self.statusDetail, name: "statusDetail", parent: name, max: 500) - try self.validate(self.statusDetail, name: "statusDetail", parent: name, pattern: "^.{0,500}$") + try self.validate(self.statusDetail, name: "statusDetail", parent: name, max: 2500) + try self.validate(self.statusDetail, name: "statusDetail", parent: name, pattern: "^.{0,2500}$") } private enum CodingKeys: String, CodingKey { diff --git a/Sources/Soto/Services/NetworkManager/NetworkManager_api.swift b/Sources/Soto/Services/NetworkManager/NetworkManager_api.swift index b030a7fe56..2484527155 100644 --- a/Sources/Soto/Services/NetworkManager/NetworkManager_api.swift +++ b/Sources/Soto/Services/NetworkManager/NetworkManager_api.swift @@ -3110,7 +3110,7 @@ public struct NetworkManager: AWSService { /// /// Parameters: /// - attachmentId: The ID of the Direct Connect gateway attachment for the updated edge locations. - /// - edgeLocations: One or more edge locations to update for the Direct Connect gateway attachment. The updated array of edge locations overwrites the previous array of locations. EdgeLocations is only used for Direct Connect gateway attachments. Do + /// - edgeLocations: One or more edge locations to update for the Direct Connect gateway attachment. The updated array of edge locations overwrites the previous array of locations. EdgeLocations is only used for Direct Connect gateway attachments. /// - logger: Logger use during operation @inlinable public func updateDirectConnectGatewayAttachment( diff --git a/Sources/Soto/Services/NetworkManager/NetworkManager_shapes.swift b/Sources/Soto/Services/NetworkManager/NetworkManager_shapes.swift index 31ae245738..4045ca2fdf 100644 --- a/Sources/Soto/Services/NetworkManager/NetworkManager_shapes.swift +++ b/Sources/Soto/Services/NetworkManager/NetworkManager_shapes.swift @@ -6792,7 +6792,7 @@ extension NetworkManager { public struct UpdateDirectConnectGatewayAttachmentRequest: AWSEncodableShape { /// The ID of the Direct Connect gateway attachment for the updated edge locations. public let attachmentId: String - /// One or more edge locations to update for the Direct Connect gateway attachment. The updated array of edge locations overwrites the previous array of locations. EdgeLocations is only used for Direct Connect gateway attachments. Do + /// One or more edge locations to update for the Direct Connect gateway attachment. The updated array of edge locations overwrites the previous array of locations. EdgeLocations is only used for Direct Connect gateway attachments. public let edgeLocations: [String]? @inlinable diff --git a/Sources/Soto/Services/Organizations/Organizations_api.swift b/Sources/Soto/Services/Organizations/Organizations_api.swift index 84bfe58b52..06a98f6c3d 100644 --- a/Sources/Soto/Services/Organizations/Organizations_api.swift +++ b/Sources/Soto/Services/Organizations/Organizations_api.swift @@ -81,6 +81,7 @@ public struct Organizations: AWSService { static var serviceEndpoints: [String: String] {[ "aws-cn-global": "organizations.cn-northwest-1.amazonaws.com.cn", "aws-global": "organizations.us-east-1.amazonaws.com", + "aws-iso-b-global": "organizations.us-isob-east-1.sc2s.sgov.gov", "aws-us-gov-global": "organizations.us-gov-west-1.amazonaws.com" ]} @@ -88,6 +89,7 @@ public struct Organizations: AWSService { static var partitionEndpoints: [AWSPartition: (endpoint: String, region: SotoCore.Region)] {[ .aws: (endpoint: "aws-global", region: .useast1), .awscn: (endpoint: "aws-cn-global", region: .cnnorthwest1), + .awsisob: (endpoint: "aws-iso-b-global", region: .usisobeast1), .awsusgov: (endpoint: "aws-us-gov-global", region: .usgovwest1) ]} diff --git a/Sources/Soto/Services/RDS/RDS_shapes.swift b/Sources/Soto/Services/RDS/RDS_shapes.swift index 9ab4cf951e..64b8daa7fa 100644 --- a/Sources/Soto/Services/RDS/RDS_shapes.swift +++ b/Sources/Soto/Services/RDS/RDS_shapes.swift @@ -72,6 +72,7 @@ extension RDS { } public enum ClientPasswordAuthType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case mysqlCachingSha2Password = "MYSQL_CACHING_SHA2_PASSWORD" case mysqlNativePassword = "MYSQL_NATIVE_PASSWORD" case postgresMd5 = "POSTGRES_MD5" case postgresScramSha256 = "POSTGRES_SCRAM_SHA_256" diff --git a/Sources/Soto/Services/Route53Domains/Route53Domains_shapes.swift b/Sources/Soto/Services/Route53Domains/Route53Domains_shapes.swift index c85a00733e..54c222223d 100644 --- a/Sources/Soto/Services/Route53Domains/Route53Domains_shapes.swift +++ b/Sources/Soto/Services/Route53Domains/Route53Domains_shapes.swift @@ -375,6 +375,7 @@ extension Route53Domains { case releaseToGandi = "RELEASE_TO_GANDI" case removeDnssec = "REMOVE_DNSSEC" case renewDomain = "RENEW_DOMAIN" + case restoreDomain = "RESTORE_DOMAIN" case transferInDomain = "TRANSFER_IN_DOMAIN" case transferOnRenew = "TRANSFER_ON_RENEW" case transferOutDomain = "TRANSFER_OUT_DOMAIN" @@ -593,7 +594,7 @@ extension Route53Domains { public func validate(name: String) throws { try self.validate(self.domainName, name: "domainName", parent: name, max: 255) - try self.validate(self.idnLangCode, name: "idnLangCode", parent: name, max: 3) + try self.validate(self.idnLangCode, name: "idnLangCode", parent: name, pattern: "^|[A-Za-z]{2,3}$") } private enum CodingKeys: String, CodingKey { @@ -687,6 +688,7 @@ extension Route53Domains { public func validate(name: String) throws { try self.validate(self.currency, name: "currency", parent: name, max: 3) try self.validate(self.currency, name: "currency", parent: name, min: 3) + try self.validate(self.maxPrice, name: "maxPrice", parent: name, min: 0.0) } private enum CodingKeys: String, CodingKey { @@ -1682,7 +1684,7 @@ extension Route53Domains { try self.validate(self.marker, name: "marker", parent: name, max: 4096) try self.validate(self.maxItems, name: "maxItems", parent: name, max: 100) try self.validate(self.status, name: "status", parent: name, max: 5) - try self.validate(self.type, name: "type", parent: name, max: 20) + try self.validate(self.type, name: "type", parent: name, max: 21) } private enum CodingKeys: String, CodingKey { @@ -2013,7 +2015,7 @@ extension Route53Domains { try self.validate(self.domainName, name: "domainName", parent: name, max: 255) try self.validate(self.durationInYears, name: "durationInYears", parent: name, max: 10) try self.validate(self.durationInYears, name: "durationInYears", parent: name, min: 1) - try self.validate(self.idnLangCode, name: "idnLangCode", parent: name, max: 3) + try self.validate(self.idnLangCode, name: "idnLangCode", parent: name, pattern: "^|[A-Za-z]{2,3}$") try self.registrantContact.validate(name: "\(name).registrantContact") try self.techContact.validate(name: "\(name).techContact") } @@ -2358,7 +2360,7 @@ extension Route53Domains { try self.validate(self.domainName, name: "domainName", parent: name, max: 255) try self.validate(self.durationInYears, name: "durationInYears", parent: name, max: 10) try self.validate(self.durationInYears, name: "durationInYears", parent: name, min: 1) - try self.validate(self.idnLangCode, name: "idnLangCode", parent: name, max: 3) + try self.validate(self.idnLangCode, name: "idnLangCode", parent: name, pattern: "^|[A-Za-z]{2,3}$") try self.nameservers?.forEach { try $0.validate(name: "\(name).nameservers[]") } diff --git a/Sources/Soto/Services/SESv2/SESv2_api.swift b/Sources/Soto/Services/SESv2/SESv2_api.swift index 00ad948c8d..3ddaf38521 100644 --- a/Sources/Soto/Services/SESv2/SESv2_api.swift +++ b/Sources/Soto/Services/SESv2/SESv2_api.swift @@ -601,6 +601,41 @@ public struct SESv2: AWSService { return try await self.createImportJob(input, logger: logger) } + /// Creates a multi-region endpoint (global-endpoint). The primary region is going to be the AWS-Region where the operation is executed. The secondary region has to be provided in request's parameters. From the data flow standpoint there is no difference between primary and secondary regions - sending traffic will be split equally between the two. The primary region is the region where the resource has been created and where it can be managed. + @Sendable + @inlinable + public func createMultiRegionEndpoint(_ input: CreateMultiRegionEndpointRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateMultiRegionEndpointResponse { + try await self.client.execute( + operation: "CreateMultiRegionEndpoint", + path: "/v2/email/multi-region-endpoints", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates a multi-region endpoint (global-endpoint). The primary region is going to be the AWS-Region where the operation is executed. The secondary region has to be provided in request's parameters. From the data flow standpoint there is no difference between primary and secondary regions - sending traffic will be split equally between the two. The primary region is the region where the resource has been created and where it can be managed. + /// + /// Parameters: + /// - details: Contains details of a multi-region endpoint (global-endpoint) being created. + /// - endpointName: The name of the multi-region endpoint (global-endpoint). + /// - tags: An array of objects that define the tags (keys and values) to associate with the multi-region endpoint (global-endpoint). + /// - logger: Logger use during operation + @inlinable + public func createMultiRegionEndpoint( + details: Details, + endpointName: String, + tags: [Tag]? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> CreateMultiRegionEndpointResponse { + let input = CreateMultiRegionEndpointRequest( + details: details, + endpointName: endpointName, + tags: tags + ) + return try await self.createMultiRegionEndpoint(input, logger: logger) + } + /// Delete an existing configuration set. Configuration sets are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email. @Sendable @inlinable @@ -871,6 +906,35 @@ public struct SESv2: AWSService { return try await self.deleteEmailTemplate(input, logger: logger) } + /// Deletes a multi-region endpoint (global-endpoint). Only multi-region endpoints (global-endpoints) whose primary region is the AWS-Region where operation is executed can be deleted. + @Sendable + @inlinable + public func deleteMultiRegionEndpoint(_ input: DeleteMultiRegionEndpointRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteMultiRegionEndpointResponse { + try await self.client.execute( + operation: "DeleteMultiRegionEndpoint", + path: "/v2/email/multi-region-endpoints/{EndpointName}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes a multi-region endpoint (global-endpoint). Only multi-region endpoints (global-endpoints) whose primary region is the AWS-Region where operation is executed can be deleted. + /// + /// Parameters: + /// - endpointName: The name of the multi-region endpoint (global-endpoint) to be deleted. + /// - logger: Logger use during operation + @inlinable + public func deleteMultiRegionEndpoint( + endpointName: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DeleteMultiRegionEndpointResponse { + let input = DeleteMultiRegionEndpointRequest( + endpointName: endpointName + ) + return try await self.deleteMultiRegionEndpoint(input, logger: logger) + } + /// Removes an email address from the suppression list for your account. @Sendable @inlinable @@ -1489,6 +1553,35 @@ public struct SESv2: AWSService { return try await self.getMessageInsights(input, logger: logger) } + /// Displays the multi-region endpoint (global-endpoint) configuration. Only multi-region endpoints (global-endpoints) whose primary region is the AWS-Region where operation is executed can be displayed. + @Sendable + @inlinable + public func getMultiRegionEndpoint(_ input: GetMultiRegionEndpointRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetMultiRegionEndpointResponse { + try await self.client.execute( + operation: "GetMultiRegionEndpoint", + path: "/v2/email/multi-region-endpoints/{EndpointName}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Displays the multi-region endpoint (global-endpoint) configuration. Only multi-region endpoints (global-endpoints) whose primary region is the AWS-Region where operation is executed can be displayed. + /// + /// Parameters: + /// - endpointName: The name of the multi-region endpoint (global-endpoint). + /// - logger: Logger use during operation + @inlinable + public func getMultiRegionEndpoint( + endpointName: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetMultiRegionEndpointResponse { + let input = GetMultiRegionEndpointRequest( + endpointName: endpointName + ) + return try await self.getMultiRegionEndpoint(input, logger: logger) + } + /// Retrieves information about a specific email address that's on the suppression list for your account. @Sendable @inlinable @@ -1894,6 +1987,38 @@ public struct SESv2: AWSService { return try await self.listImportJobs(input, logger: logger) } + /// List the multi-region endpoints (global-endpoints). Only multi-region endpoints (global-endpoints) whose primary region is the AWS-Region where operation is executed will be listed. + @Sendable + @inlinable + public func listMultiRegionEndpoints(_ input: ListMultiRegionEndpointsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListMultiRegionEndpointsResponse { + try await self.client.execute( + operation: "ListMultiRegionEndpoints", + path: "/v2/email/multi-region-endpoints", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// List the multi-region endpoints (global-endpoints). Only multi-region endpoints (global-endpoints) whose primary region is the AWS-Region where operation is executed will be listed. + /// + /// Parameters: + /// - nextToken: A token returned from a previous call to ListMultiRegionEndpoints to indicate the position in the list of multi-region endpoints (global-endpoints). + /// - pageSize: The number of results to show in a single call to ListMultiRegionEndpoints. If the number of results is larger than the number you specified in this parameter, the response includes a NextToken element that you can use to retrieve the next page of results. + /// - logger: Logger use during operation + @inlinable + public func listMultiRegionEndpoints( + nextToken: String? = nil, + pageSize: Int? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListMultiRegionEndpointsResponse { + let input = ListMultiRegionEndpointsRequest( + nextToken: nextToken, + pageSize: pageSize + ) + return try await self.listMultiRegionEndpoints(input, logger: logger) + } + /// Lists the recommendations present in your Amazon SES account in the current Amazon Web Services Region. You can execute this operation no more than once per second. @Sendable @inlinable @@ -2704,6 +2829,7 @@ public struct SESv2: AWSService { /// - configurationSetName: The name of the configuration set to use when sending the email. /// - defaultContent: An object that contains the body of the message. You can specify a template message. /// - defaultEmailTags: A list of tags, in the form of name/value pairs, to apply to an email that you send using the SendEmail operation. Tags correspond to characteristics of the email that you define, so that you can publish email sending events. + /// - endpointId: The ID of the multi-region endpoint (global-endpoint). /// - feedbackForwardingEmailAddress: The address that you want bounce and complaint notifications to be sent to. /// - feedbackForwardingEmailAddressIdentityArn: This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the FeedbackForwardingEmailAddress parameter. For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to use feedback@example.com, then you would specify the FeedbackForwardingEmailAddressIdentityArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the FeedbackForwardingEmailAddress to be feedback@example.com. For more information about sending authorization, see the Amazon SES Developer Guide. /// - fromEmailAddress: The email address to use as the "From" address for the email. The address that you specify has to be verified. @@ -2716,6 +2842,7 @@ public struct SESv2: AWSService { configurationSetName: String? = nil, defaultContent: BulkEmailContent, defaultEmailTags: [MessageTag]? = nil, + endpointId: String? = nil, feedbackForwardingEmailAddress: String? = nil, feedbackForwardingEmailAddressIdentityArn: String? = nil, fromEmailAddress: String? = nil, @@ -2728,6 +2855,7 @@ public struct SESv2: AWSService { configurationSetName: configurationSetName, defaultContent: defaultContent, defaultEmailTags: defaultEmailTags, + endpointId: endpointId, feedbackForwardingEmailAddress: feedbackForwardingEmailAddress, feedbackForwardingEmailAddressIdentityArn: feedbackForwardingEmailAddressIdentityArn, fromEmailAddress: fromEmailAddress, @@ -2792,6 +2920,7 @@ public struct SESv2: AWSService { /// - content: An object that contains the body of the message. You can send either a Simple message, Raw message, or a Templated message. /// - destination: An object that contains the recipients of the email message. /// - emailTags: A list of tags, in the form of name/value pairs, to apply to an email that you send using the SendEmail operation. Tags correspond to characteristics of the email that you define, so that you can publish email sending events. + /// - endpointId: The ID of the multi-region endpoint (global-endpoint). /// - feedbackForwardingEmailAddress: The address that you want bounce and complaint notifications to be sent to. /// - feedbackForwardingEmailAddressIdentityArn: This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the FeedbackForwardingEmailAddress parameter. For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to use feedback@example.com, then you would specify the FeedbackForwardingEmailAddressIdentityArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the FeedbackForwardingEmailAddress to be feedback@example.com. For more information about sending authorization, see the Amazon SES Developer Guide. /// - fromEmailAddress: The email address to use as the "From" address for the email. The address that you specify has to be verified. @@ -2805,6 +2934,7 @@ public struct SESv2: AWSService { content: EmailContent, destination: Destination? = nil, emailTags: [MessageTag]? = nil, + endpointId: String? = nil, feedbackForwardingEmailAddress: String? = nil, feedbackForwardingEmailAddressIdentityArn: String? = nil, fromEmailAddress: String? = nil, @@ -2818,6 +2948,7 @@ public struct SESv2: AWSService { content: content, destination: destination, emailTags: emailTags, + endpointId: endpointId, feedbackForwardingEmailAddress: feedbackForwardingEmailAddress, feedbackForwardingEmailAddressIdentityArn: feedbackForwardingEmailAddressIdentityArn, fromEmailAddress: fromEmailAddress, @@ -3595,6 +3726,40 @@ extension SESv2 { return self.listImportJobsPaginator(input, logger: logger) } + /// Return PaginatorSequence for operation ``listMultiRegionEndpoints(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listMultiRegionEndpointsPaginator( + _ input: ListMultiRegionEndpointsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listMultiRegionEndpoints, + inputKey: \ListMultiRegionEndpointsRequest.nextToken, + outputKey: \ListMultiRegionEndpointsResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listMultiRegionEndpoints(_:logger:)``. + /// + /// - Parameters: + /// - pageSize: The number of results to show in a single call to ListMultiRegionEndpoints. If the number of results is larger than the number you specified in this parameter, the response includes a NextToken element that you can use to retrieve the next page of results. + /// - logger: Logger used for logging + @inlinable + public func listMultiRegionEndpointsPaginator( + pageSize: Int? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListMultiRegionEndpointsRequest( + pageSize: pageSize + ) + return self.listMultiRegionEndpointsPaginator(input, logger: logger) + } + /// Return PaginatorSequence for operation ``listRecommendations(_:logger:)``. /// /// - Parameters: @@ -3805,6 +3970,16 @@ extension SESv2.ListImportJobsRequest: AWSPaginateToken { } } +extension SESv2.ListMultiRegionEndpointsRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> SESv2.ListMultiRegionEndpointsRequest { + return .init( + nextToken: token, + pageSize: self.pageSize + ) + } +} + extension SESv2.ListRecommendationsRequest: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> SESv2.ListRecommendationsRequest { diff --git a/Sources/Soto/Services/SESv2/SESv2_shapes.swift b/Sources/Soto/Services/SESv2/SESv2_shapes.swift index 18965a97b0..dc4f347072 100644 --- a/Sources/Soto/Services/SESv2/SESv2_shapes.swift +++ b/Sources/Soto/Services/SESv2/SESv2_shapes.swift @@ -107,6 +107,28 @@ extension SESv2 { public enum DkimSigningAttributesOrigin: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case awsSes = "AWS_SES" + case awsSesAfSouth1 = "AWS_SES_AF_SOUTH_1" + case awsSesApNortheast1 = "AWS_SES_AP_NORTHEAST_1" + case awsSesApNortheast2 = "AWS_SES_AP_NORTHEAST_2" + case awsSesApNortheast3 = "AWS_SES_AP_NORTHEAST_3" + case awsSesApSouth1 = "AWS_SES_AP_SOUTH_1" + case awsSesApSoutheast1 = "AWS_SES_AP_SOUTHEAST_1" + case awsSesApSoutheast2 = "AWS_SES_AP_SOUTHEAST_2" + case awsSesApSoutheast3 = "AWS_SES_AP_SOUTHEAST_3" + case awsSesCaCentral1 = "AWS_SES_CA_CENTRAL_1" + case awsSesEuCentral1 = "AWS_SES_EU_CENTRAL_1" + case awsSesEuNorth1 = "AWS_SES_EU_NORTH_1" + case awsSesEuSouth1 = "AWS_SES_EU_SOUTH_1" + case awsSesEuWest1 = "AWS_SES_EU_WEST_1" + case awsSesEuWest2 = "AWS_SES_EU_WEST_2" + case awsSesEuWest3 = "AWS_SES_EU_WEST_3" + case awsSesIlCentral1 = "AWS_SES_IL_CENTRAL_1" + case awsSesMeSouth1 = "AWS_SES_ME_SOUTH_1" + case awsSesSaEast1 = "AWS_SES_SA_EAST_1" + case awsSesUsEast1 = "AWS_SES_US_EAST_1" + case awsSesUsEast2 = "AWS_SES_US_EAST_2" + case awsSesUsWest1 = "AWS_SES_US_WEST_1" + case awsSesUsWest2 = "AWS_SES_US_WEST_2" case external = "EXTERNAL" public var description: String { return self.rawValue } } @@ -281,6 +303,14 @@ extension SESv2 { public var description: String { return self.rawValue } } + public enum Status: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case creating = "CREATING" + case deleting = "DELETING" + case failed = "FAILED" + case ready = "READY" + public var description: String { return self.rawValue } + } + public enum SubscriptionStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case optIn = "OPT_IN" case optOut = "OPT_OUT" @@ -309,6 +339,11 @@ extension SESv2 { case dnsServerError = "DNS_SERVER_ERROR" case hostNotFound = "HOST_NOT_FOUND" case invalidValue = "INVALID_VALUE" + case replicationAccessDenied = "REPLICATION_ACCESS_DENIED" + case replicationPrimaryByoDkimNotSupported = "REPLICATION_PRIMARY_BYO_DKIM_NOT_SUPPORTED" + case replicationPrimaryInvalidRegion = "REPLICATION_PRIMARY_INVALID_REGION" + case replicationPrimaryNotFound = "REPLICATION_PRIMARY_NOT_FOUND" + case replicationReplicaAsPrimaryNotSupported = "REPLICATION_REPLICA_AS_PRIMARY_NOT_SUPPORTED" case serviceError = "SERVICE_ERROR" case typeNotFound = "TYPE_NOT_FOUND" public var description: String { return self.rawValue } @@ -1208,6 +1243,52 @@ extension SESv2 { } } + public struct CreateMultiRegionEndpointRequest: AWSEncodableShape { + /// Contains details of a multi-region endpoint (global-endpoint) being created. + public let details: Details + /// The name of the multi-region endpoint (global-endpoint). + public let endpointName: String + /// An array of objects that define the tags (keys and values) to associate with the multi-region endpoint (global-endpoint). + public let tags: [Tag]? + + @inlinable + public init(details: Details, endpointName: String, tags: [Tag]? = nil) { + self.details = details + self.endpointName = endpointName + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.endpointName, name: "endpointName", parent: name, max: 64) + try self.validate(self.endpointName, name: "endpointName", parent: name, min: 1) + try self.validate(self.endpointName, name: "endpointName", parent: name, pattern: "^[\\w\\-_]+$") + } + + private enum CodingKeys: String, CodingKey { + case details = "Details" + case endpointName = "EndpointName" + case tags = "Tags" + } + } + + public struct CreateMultiRegionEndpointResponse: AWSDecodableShape { + /// The ID of the multi-region endpoint (global-endpoint). + public let endpointId: String? + /// A status of the multi-region endpoint (global-endpoint) right after the create request. CREATING – The resource is being provisioned. READY – The resource is ready to use. FAILED – The resource failed to be provisioned. DELETING – The resource is being deleted as requested. + public let status: Status? + + @inlinable + public init(endpointId: String? = nil, status: Status? = nil) { + self.endpointId = endpointId + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case endpointId = "EndpointId" + case status = "Status" + } + } + public struct CustomVerificationEmailTemplateMetadata: AWSDecodableShape { /// The URL that the recipient of the verification email is sent to if his or her address is not successfully verified. public let failureRedirectionURL: String? @@ -1560,6 +1641,44 @@ extension SESv2 { public init() {} } + public struct DeleteMultiRegionEndpointRequest: AWSEncodableShape { + /// The name of the multi-region endpoint (global-endpoint) to be deleted. + public let endpointName: String + + @inlinable + public init(endpointName: String) { + self.endpointName = endpointName + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.endpointName, key: "EndpointName") + } + + public func validate(name: String) throws { + try self.validate(self.endpointName, name: "endpointName", parent: name, max: 64) + try self.validate(self.endpointName, name: "endpointName", parent: name, min: 1) + try self.validate(self.endpointName, name: "endpointName", parent: name, pattern: "^[\\w\\-_]+$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteMultiRegionEndpointResponse: AWSDecodableShape { + /// A status of the multi-region endpoint (global-endpoint) right after the delete request. CREATING – The resource is being provisioned. READY – The resource is ready to use. FAILED – The resource failed to be provisioned. DELETING – The resource is being deleted as requested. + public let status: Status? + + @inlinable + public init(status: Status? = nil) { + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case status = "Status" + } + } + public struct DeleteSuppressedDestinationRequest: AWSEncodableShape { /// The suppressed email destination to remove from the account suppression list. public let emailAddress: String @@ -1665,6 +1784,20 @@ extension SESv2 { } } + public struct Details: AWSEncodableShape { + /// A list of route configuration details. Must contain exactly one route configuration. + public let routesDetails: [RouteDetails] + + @inlinable + public init(routesDetails: [RouteDetails]) { + self.routesDetails = routesDetails + } + + private enum CodingKeys: String, CodingKey { + case routesDetails = "RoutesDetails" + } + } + public struct DkimAttributes: AWSDecodableShape { /// [Easy DKIM] The key length of the DKIM key pair in use. public let currentSigningKeyLength: DkimSigningKeyLength? @@ -1672,7 +1805,7 @@ extension SESv2 { public let lastKeyGenerationTimestamp: Date? /// [Easy DKIM] The key length of the future DKIM key pair to be generated. This can be changed at most once per day. public let nextSigningKeyLength: DkimSigningKeyLength? - /// A string that indicates how DKIM was configured for the identity. These are the possible values: AWS_SES – Indicates that DKIM was configured for the identity by using Easy DKIM. EXTERNAL – Indicates that DKIM was configured for the identity by using Bring Your Own DKIM (BYODKIM). + /// A string that indicates how DKIM was configured for the identity. These are the possible values: AWS_SES – Indicates that DKIM was configured for the identity by using Easy DKIM. EXTERNAL – Indicates that DKIM was configured for the identity by using Bring Your Own DKIM (BYODKIM). AWS_SES_AF_SOUTH_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Africa (Cape Town) region using Deterministic Easy-DKIM (DEED). AWS_SES_EU_NORTH_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Europe (Stockholm) region using Deterministic Easy-DKIM (DEED). AWS_SES_AP_SOUTH_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Asia Pacific (Mumbai) region using Deterministic Easy-DKIM (DEED). AWS_SES_EU_WEST_3 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Europe (Paris) region using Deterministic Easy-DKIM (DEED). AWS_SES_EU_WEST_2 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Europe (London) region using Deterministic Easy-DKIM (DEED). AWS_SES_EU_SOUTH_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Europe (Milan) region using Deterministic Easy-DKIM (DEED). AWS_SES_EU_WEST_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Europe (Ireland) region using Deterministic Easy-DKIM (DEED). AWS_SES_AP_NORTHEAST_3 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Asia Pacific (Osaka) region using Deterministic Easy-DKIM (DEED). AWS_SES_AP_NORTHEAST_2 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Asia Pacific (Seoul) region using Deterministic Easy-DKIM (DEED). AWS_SES_ME_SOUTH_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Middle East (Bahrain) region using Deterministic Easy-DKIM (DEED). AWS_SES_AP_NORTHEAST_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Asia Pacific (Tokyo) region using Deterministic Easy-DKIM (DEED). AWS_SES_IL_CENTRAL_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Israel (Tel Aviv) region using Deterministic Easy-DKIM (DEED). AWS_SES_SA_EAST_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in South America (São Paulo) region using Deterministic Easy-DKIM (DEED). AWS_SES_CA_CENTRAL_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Canada (Central) region using Deterministic Easy-DKIM (DEED). AWS_SES_AP_SOUTHEAST_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Asia Pacific (Singapore) region using Deterministic Easy-DKIM (DEED). AWS_SES_AP_SOUTHEAST_2 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Asia Pacific (Sydney) region using Deterministic Easy-DKIM (DEED). AWS_SES_AP_SOUTHEAST_3 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Asia Pacific (Jakarta) region using Deterministic Easy-DKIM (DEED). AWS_SES_EU_CENTRAL_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Europe (Frankfurt) region using Deterministic Easy-DKIM (DEED). AWS_SES_US_EAST_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in US East (N. Virginia) region using Deterministic Easy-DKIM (DEED). AWS_SES_US_EAST_2 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in US East (Ohio) region using Deterministic Easy-DKIM (DEED). AWS_SES_US_WEST_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in US West (N. California) region using Deterministic Easy-DKIM (DEED). AWS_SES_US_WEST_2 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in US West (Oregon) region using Deterministic Easy-DKIM (DEED). public let signingAttributesOrigin: DkimSigningAttributesOrigin? /// If the value is true, then the messages that you send from the identity are signed using DKIM. If the value is false, then the messages that you send from the identity aren't DKIM-signed. public let signingEnabled: Bool? @@ -1704,6 +1837,8 @@ extension SESv2 { } public struct DkimSigningAttributes: AWSEncodableShape { + /// The attribute to use for configuring DKIM for the identity depends on the operation: For PutEmailIdentityDkimSigningAttributes: None of the values are allowed - use the SigningAttributesOrigin parameter instead For CreateEmailIdentity when replicating a parent identity's DKIM configuration: Allowed values: All values except AWS_SES and EXTERNAL AWS_SES – Configure DKIM for the identity by using Easy DKIM. EXTERNAL – Configure DKIM for the identity by using Bring Your Own DKIM (BYODKIM). AWS_SES_AF_SOUTH_1 – Configure DKIM for the identity by replicating from a parent identity in Africa (Cape Town) region using Deterministic Easy-DKIM (DEED). AWS_SES_EU_NORTH_1 – Configure DKIM for the identity by replicating from a parent identity in Europe (Stockholm) region using Deterministic Easy-DKIM (DEED). AWS_SES_AP_SOUTH_1 – Configure DKIM for the identity by replicating from a parent identity in Asia Pacific (Mumbai) region using Deterministic Easy-DKIM (DEED). AWS_SES_EU_WEST_3 – Configure DKIM for the identity by replicating from a parent identity in Europe (Paris) region using Deterministic Easy-DKIM (DEED). AWS_SES_EU_WEST_2 – Configure DKIM for the identity by replicating from a parent identity in Europe (London) region using Deterministic Easy-DKIM (DEED). AWS_SES_EU_SOUTH_1 – Configure DKIM for the identity by replicating from a parent identity in Europe (Milan) region using Deterministic Easy-DKIM (DEED). AWS_SES_EU_WEST_1 – Configure DKIM for the identity by replicating from a parent identity in Europe (Ireland) region using Deterministic Easy-DKIM (DEED). AWS_SES_AP_NORTHEAST_3 – Configure DKIM for the identity by replicating from a parent identity in Asia Pacific (Osaka) region using Deterministic Easy-DKIM (DEED). AWS_SES_AP_NORTHEAST_2 – Configure DKIM for the identity by replicating from a parent identity in Asia Pacific (Seoul) region using Deterministic Easy-DKIM (DEED). AWS_SES_ME_SOUTH_1 – Configure DKIM for the identity by replicating from a parent identity in Middle East (Bahrain) region using Deterministic Easy-DKIM (DEED). AWS_SES_AP_NORTHEAST_1 – Configure DKIM for the identity by replicating from a parent identity in Asia Pacific (Tokyo) region using Deterministic Easy-DKIM (DEED). AWS_SES_IL_CENTRAL_1 – Configure DKIM for the identity by replicating from a parent identity in Israel (Tel Aviv) region using Deterministic Easy-DKIM (DEED). AWS_SES_SA_EAST_1 – Configure DKIM for the identity by replicating from a parent identity in South America (São Paulo) region using Deterministic Easy-DKIM (DEED). AWS_SES_CA_CENTRAL_1 – Configure DKIM for the identity by replicating from a parent identity in Canada (Central) region using Deterministic Easy-DKIM (DEED). AWS_SES_AP_SOUTHEAST_1 – Configure DKIM for the identity by replicating from a parent identity in Asia Pacific (Singapore) region using Deterministic Easy-DKIM (DEED). AWS_SES_AP_SOUTHEAST_2 – Configure DKIM for the identity by replicating from a parent identity in Asia Pacific (Sydney) region using Deterministic Easy-DKIM (DEED). AWS_SES_AP_SOUTHEAST_3 – Configure DKIM for the identity by replicating from a parent identity in Asia Pacific (Jakarta) region using Deterministic Easy-DKIM (DEED). AWS_SES_EU_CENTRAL_1 – Configure DKIM for the identity by replicating from a parent identity in Europe (Frankfurt) region using Deterministic Easy-DKIM (DEED). AWS_SES_US_EAST_1 – Configure DKIM for the identity by replicating from a parent identity in US East (N. Virginia) region using Deterministic Easy-DKIM (DEED). AWS_SES_US_EAST_2 – Configure DKIM for the identity by replicating from a parent identity in US East (Ohio) region using Deterministic Easy-DKIM (DEED). AWS_SES_US_WEST_1 – Configure DKIM for the identity by replicating from a parent identity in US West (N. California) region using Deterministic Easy-DKIM (DEED). AWS_SES_US_WEST_2 – Configure DKIM for the identity by replicating from a parent identity in US West (Oregon) region using Deterministic Easy-DKIM (DEED). + public let domainSigningAttributesOrigin: DkimSigningAttributesOrigin? /// [Bring Your Own DKIM] A private key that's used to generate a DKIM signature. The private key must use 1024 or 2048-bit RSA encryption, and must be encoded using base64 encoding. public let domainSigningPrivateKey: String? /// [Bring Your Own DKIM] A string that's used to identify a public key in the DNS configuration for a domain. @@ -1712,7 +1847,8 @@ extension SESv2 { public let nextSigningKeyLength: DkimSigningKeyLength? @inlinable - public init(domainSigningPrivateKey: String? = nil, domainSigningSelector: String? = nil, nextSigningKeyLength: DkimSigningKeyLength? = nil) { + public init(domainSigningAttributesOrigin: DkimSigningAttributesOrigin? = nil, domainSigningPrivateKey: String? = nil, domainSigningSelector: String? = nil, nextSigningKeyLength: DkimSigningKeyLength? = nil) { + self.domainSigningAttributesOrigin = domainSigningAttributesOrigin self.domainSigningPrivateKey = domainSigningPrivateKey self.domainSigningSelector = domainSigningSelector self.nextSigningKeyLength = nextSigningKeyLength @@ -1728,6 +1864,7 @@ extension SESv2 { } private enum CodingKeys: String, CodingKey { + case domainSigningAttributesOrigin = "DomainSigningAttributesOrigin" case domainSigningPrivateKey = "DomainSigningPrivateKey" case domainSigningSelector = "DomainSigningSelector" case nextSigningKeyLength = "NextSigningKeyLength" @@ -3122,6 +3259,64 @@ extension SESv2 { } } + public struct GetMultiRegionEndpointRequest: AWSEncodableShape { + /// The name of the multi-region endpoint (global-endpoint). + public let endpointName: String + + @inlinable + public init(endpointName: String) { + self.endpointName = endpointName + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.endpointName, key: "EndpointName") + } + + public func validate(name: String) throws { + try self.validate(self.endpointName, name: "endpointName", parent: name, max: 64) + try self.validate(self.endpointName, name: "endpointName", parent: name, min: 1) + try self.validate(self.endpointName, name: "endpointName", parent: name, pattern: "^[\\w\\-_]+$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetMultiRegionEndpointResponse: AWSDecodableShape { + /// The time stamp of when the multi-region endpoint (global-endpoint) was created. + public let createdTimestamp: Date? + /// The ID of the multi-region endpoint (global-endpoint). + public let endpointId: String? + /// The name of the multi-region endpoint (global-endpoint). + public let endpointName: String? + /// The time stamp of when the multi-region endpoint (global-endpoint) was last updated. + public let lastUpdatedTimestamp: Date? + /// Contains routes information for the multi-region endpoint (global-endpoint). + public let routes: [Route]? + /// The status of the multi-region endpoint (global-endpoint). CREATING – The resource is being provisioned. READY – The resource is ready to use. FAILED – The resource failed to be provisioned. DELETING – The resource is being deleted as requested. + public let status: Status? + + @inlinable + public init(createdTimestamp: Date? = nil, endpointId: String? = nil, endpointName: String? = nil, lastUpdatedTimestamp: Date? = nil, routes: [Route]? = nil, status: Status? = nil) { + self.createdTimestamp = createdTimestamp + self.endpointId = endpointId + self.endpointName = endpointName + self.lastUpdatedTimestamp = lastUpdatedTimestamp + self.routes = routes + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case createdTimestamp = "CreatedTimestamp" + case endpointId = "EndpointId" + case endpointName = "EndpointName" + case lastUpdatedTimestamp = "LastUpdatedTimestamp" + case routes = "Routes" + case status = "Status" + } + } + public struct GetSuppressedDestinationRequest: AWSEncodableShape { /// The email address that's on the account suppression list. public let emailAddress: String @@ -3859,6 +4054,54 @@ extension SESv2 { } } + public struct ListMultiRegionEndpointsRequest: AWSEncodableShape { + /// A token returned from a previous call to ListMultiRegionEndpoints to indicate the position in the list of multi-region endpoints (global-endpoints). + public let nextToken: String? + /// The number of results to show in a single call to ListMultiRegionEndpoints. If the number of results is larger than the number you specified in this parameter, the response includes a NextToken element that you can use to retrieve the next page of results. + public let pageSize: Int? + + @inlinable + public init(nextToken: String? = nil, pageSize: Int? = nil) { + self.nextToken = nextToken + self.pageSize = pageSize + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.nextToken, key: "NextToken") + request.encodeQuery(self.pageSize, key: "PageSize") + } + + public func validate(name: String) throws { + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 5000) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$") + try self.validate(self.pageSize, name: "pageSize", parent: name, max: 1000) + try self.validate(self.pageSize, name: "pageSize", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListMultiRegionEndpointsResponse: AWSDecodableShape { + /// An array that contains key multi-region endpoint (global-endpoint) properties. + public let multiRegionEndpoints: [MultiRegionEndpoint]? + /// A token indicating that there are additional multi-region endpoints (global-endpoints) available to be listed. Pass this token to a subsequent ListMultiRegionEndpoints call to retrieve the next page. + public let nextToken: String? + + @inlinable + public init(multiRegionEndpoints: [MultiRegionEndpoint]? = nil, nextToken: String? = nil) { + self.multiRegionEndpoints = multiRegionEndpoints + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case multiRegionEndpoints = "MultiRegionEndpoints" + case nextToken = "NextToken" + } + } + public struct ListRecommendationsRequest: AWSEncodableShape { /// Filters applied when retrieving recommendations. Can eiter be an individual filter, or combinations of STATUS and IMPACT or STATUS and TYPE public let filter: [ListRecommendationsFilterKey: String]? @@ -4265,6 +4508,40 @@ extension SESv2 { } } + public struct MultiRegionEndpoint: AWSDecodableShape { + /// The time stamp of when the multi-region endpoint (global-endpoint) was created. + public let createdTimestamp: Date? + /// The ID of the multi-region endpoint (global-endpoint). + public let endpointId: String? + /// The name of the multi-region endpoint (global-endpoint). + public let endpointName: String? + /// The time stamp of when the multi-region endpoint (global-endpoint) was last updated. + public let lastUpdatedTimestamp: Date? + /// Primary and secondary regions between which multi-region endpoint splits sending traffic. + public let regions: [String]? + /// The status of the multi-region endpoint (global-endpoint). CREATING – The resource is being provisioned. READY – The resource is ready to use. FAILED – The resource failed to be provisioned. DELETING – The resource is being deleted as requested. + public let status: Status? + + @inlinable + public init(createdTimestamp: Date? = nil, endpointId: String? = nil, endpointName: String? = nil, lastUpdatedTimestamp: Date? = nil, regions: [String]? = nil, status: Status? = nil) { + self.createdTimestamp = createdTimestamp + self.endpointId = endpointId + self.endpointName = endpointName + self.lastUpdatedTimestamp = lastUpdatedTimestamp + self.regions = regions + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case createdTimestamp = "CreatedTimestamp" + case endpointId = "EndpointId" + case endpointName = "EndpointName" + case lastUpdatedTimestamp = "LastUpdatedTimestamp" + case regions = "Regions" + case status = "Status" + } + } + public struct OverallVolume: AWSDecodableShape { /// An object that contains inbox and junk mail placement metrics for individual email providers. public let domainIspPlacements: [DomainIspPlacement]? @@ -5079,6 +5356,34 @@ extension SESv2 { } } + public struct Route: AWSDecodableShape { + /// The name of an AWS-Region. + public let region: String + + @inlinable + public init(region: String) { + self.region = region + } + + private enum CodingKeys: String, CodingKey { + case region = "Region" + } + } + + public struct RouteDetails: AWSEncodableShape { + /// The name of an AWS-Region to be a secondary region for the multi-region endpoint (global-endpoint). + public let region: String + + @inlinable + public init(region: String) { + self.region = region + } + + private enum CodingKeys: String, CodingKey { + case region = "Region" + } + } + public struct SOARecord: AWSDecodableShape { /// Administrative contact email from the SOA record. public let adminEmail: String? @@ -5110,6 +5415,8 @@ extension SESv2 { public let defaultContent: BulkEmailContent /// A list of tags, in the form of name/value pairs, to apply to an email that you send using the SendEmail operation. Tags correspond to characteristics of the email that you define, so that you can publish email sending events. public let defaultEmailTags: [MessageTag]? + /// The ID of the multi-region endpoint (global-endpoint). + public let endpointId: String? /// The address that you want bounce and complaint notifications to be sent to. public let feedbackForwardingEmailAddress: String? /// This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the FeedbackForwardingEmailAddress parameter. For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to use feedback@example.com, then you would specify the FeedbackForwardingEmailAddressIdentityArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the FeedbackForwardingEmailAddress to be feedback@example.com. For more information about sending authorization, see the Amazon SES Developer Guide. @@ -5122,11 +5429,12 @@ extension SESv2 { public let replyToAddresses: [String]? @inlinable - public init(bulkEmailEntries: [BulkEmailEntry], configurationSetName: String? = nil, defaultContent: BulkEmailContent, defaultEmailTags: [MessageTag]? = nil, feedbackForwardingEmailAddress: String? = nil, feedbackForwardingEmailAddressIdentityArn: String? = nil, fromEmailAddress: String? = nil, fromEmailAddressIdentityArn: String? = nil, replyToAddresses: [String]? = nil) { + public init(bulkEmailEntries: [BulkEmailEntry], configurationSetName: String? = nil, defaultContent: BulkEmailContent, defaultEmailTags: [MessageTag]? = nil, endpointId: String? = nil, feedbackForwardingEmailAddress: String? = nil, feedbackForwardingEmailAddressIdentityArn: String? = nil, fromEmailAddress: String? = nil, fromEmailAddressIdentityArn: String? = nil, replyToAddresses: [String]? = nil) { self.bulkEmailEntries = bulkEmailEntries self.configurationSetName = configurationSetName self.defaultContent = defaultContent self.defaultEmailTags = defaultEmailTags + self.endpointId = endpointId self.feedbackForwardingEmailAddress = feedbackForwardingEmailAddress self.feedbackForwardingEmailAddressIdentityArn = feedbackForwardingEmailAddressIdentityArn self.fromEmailAddress = fromEmailAddress @@ -5146,6 +5454,7 @@ extension SESv2 { case configurationSetName = "ConfigurationSetName" case defaultContent = "DefaultContent" case defaultEmailTags = "DefaultEmailTags" + case endpointId = "EndpointId" case feedbackForwardingEmailAddress = "FeedbackForwardingEmailAddress" case feedbackForwardingEmailAddressIdentityArn = "FeedbackForwardingEmailAddressIdentityArn" case fromEmailAddress = "FromEmailAddress" @@ -5217,6 +5526,8 @@ extension SESv2 { public let destination: Destination? /// A list of tags, in the form of name/value pairs, to apply to an email that you send using the SendEmail operation. Tags correspond to characteristics of the email that you define, so that you can publish email sending events. public let emailTags: [MessageTag]? + /// The ID of the multi-region endpoint (global-endpoint). + public let endpointId: String? /// The address that you want bounce and complaint notifications to be sent to. public let feedbackForwardingEmailAddress: String? /// This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the FeedbackForwardingEmailAddress parameter. For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to use feedback@example.com, then you would specify the FeedbackForwardingEmailAddressIdentityArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the FeedbackForwardingEmailAddress to be feedback@example.com. For more information about sending authorization, see the Amazon SES Developer Guide. @@ -5231,11 +5542,12 @@ extension SESv2 { public let replyToAddresses: [String]? @inlinable - public init(configurationSetName: String? = nil, content: EmailContent, destination: Destination? = nil, emailTags: [MessageTag]? = nil, feedbackForwardingEmailAddress: String? = nil, feedbackForwardingEmailAddressIdentityArn: String? = nil, fromEmailAddress: String? = nil, fromEmailAddressIdentityArn: String? = nil, listManagementOptions: ListManagementOptions? = nil, replyToAddresses: [String]? = nil) { + public init(configurationSetName: String? = nil, content: EmailContent, destination: Destination? = nil, emailTags: [MessageTag]? = nil, endpointId: String? = nil, feedbackForwardingEmailAddress: String? = nil, feedbackForwardingEmailAddressIdentityArn: String? = nil, fromEmailAddress: String? = nil, fromEmailAddressIdentityArn: String? = nil, listManagementOptions: ListManagementOptions? = nil, replyToAddresses: [String]? = nil) { self.configurationSetName = configurationSetName self.content = content self.destination = destination self.emailTags = emailTags + self.endpointId = endpointId self.feedbackForwardingEmailAddress = feedbackForwardingEmailAddress self.feedbackForwardingEmailAddressIdentityArn = feedbackForwardingEmailAddressIdentityArn self.fromEmailAddress = fromEmailAddress @@ -5253,6 +5565,7 @@ extension SESv2 { case content = "Content" case destination = "Destination" case emailTags = "EmailTags" + case endpointId = "EndpointId" case feedbackForwardingEmailAddress = "FeedbackForwardingEmailAddress" case feedbackForwardingEmailAddressIdentityArn = "FeedbackForwardingEmailAddressIdentityArn" case fromEmailAddress = "FromEmailAddress" @@ -5933,7 +6246,7 @@ extension SESv2 { } public struct VerificationInfo: AWSDecodableShape { - /// Provides the reason for the failure describing why Amazon SES was not able to successfully verify the identity. Below are the possible values: INVALID_VALUE – Amazon SES was able to find the record, but the value contained within the record was invalid. Ensure you have published the correct values for the record. TYPE_NOT_FOUND – The queried hostname exists but does not have the requested type of DNS record. Ensure that you have published the correct type of DNS record. HOST_NOT_FOUND – The queried hostname does not exist or was not reachable at the time of the request. Ensure that you have published the required DNS record(s). SERVICE_ERROR – A temporary issue is preventing Amazon SES from determining the verification status of the domain. DNS_SERVER_ERROR – The DNS server encountered an issue and was unable to complete the request. + /// Provides the reason for the failure describing why Amazon SES was not able to successfully verify the identity. Below are the possible values: INVALID_VALUE – Amazon SES was able to find the record, but the value contained within the record was invalid. Ensure you have published the correct values for the record. TYPE_NOT_FOUND – The queried hostname exists but does not have the requested type of DNS record. Ensure that you have published the correct type of DNS record. HOST_NOT_FOUND – The queried hostname does not exist or was not reachable at the time of the request. Ensure that you have published the required DNS record(s). SERVICE_ERROR – A temporary issue is preventing Amazon SES from determining the verification status of the domain. DNS_SERVER_ERROR – The DNS server encountered an issue and was unable to complete the request. REPLICATION_ACCESS_DENIED – The verification failed because the user does not have the required permissions to replicate the DKIM key from the primary region. Ensure you have the necessary permissions in both primary and replica regions. REPLICATION_PRIMARY_NOT_FOUND – The verification failed because no corresponding identity was found in the specified primary region. Ensure the identity exists in the primary region before attempting replication. REPLICATION_PRIMARY_BYO_DKIM_NOT_SUPPORTED – The verification failed because the identity in the primary region is configured with Bring Your Own DKIM (BYODKIM). DKIM key replication is only supported for identities using Easy DKIM. REPLICATION_REPLICA_AS_PRIMARY_NOT_SUPPORTED – The verification failed because the specified primary identity is a replica of another identity, and multi-level replication is not supported; the primary identity must be a non-replica identity. REPLICATION_PRIMARY_INVALID_REGION – The verification failed due to an invalid primary region specified. Ensure you provide a valid AWS region where Amazon SES is available and different from the replica region. public let errorType: VerificationError? /// The last time a verification attempt was made for this identity. public let lastCheckedTimestamp: Date? diff --git a/Sources/Soto/Services/ServiceDiscovery/ServiceDiscovery_api.swift b/Sources/Soto/Services/ServiceDiscovery/ServiceDiscovery_api.swift index 9c07997861..b7c8c55d26 100644 --- a/Sources/Soto/Services/ServiceDiscovery/ServiceDiscovery_api.swift +++ b/Sources/Soto/Services/ServiceDiscovery/ServiceDiscovery_api.swift @@ -345,7 +345,7 @@ public struct ServiceDiscovery: AWSService { return try await self.deleteNamespace(input, logger: logger) } - /// Deletes a specified service. If the service still contains one or more registered instances, the request fails. + /// Deletes a specified service and all associated service attributes. If the service still contains one or more registered instances, the request fails. @Sendable @inlinable public func deleteService(_ input: DeleteServiceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteServiceResponse { @@ -358,7 +358,7 @@ public struct ServiceDiscovery: AWSService { logger: logger ) } - /// Deletes a specified service. If the service still contains one or more registered instances, the request fails. + /// Deletes a specified service and all associated service attributes. If the service still contains one or more registered instances, the request fails. /// /// Parameters: /// - id: The ID of the service that you want to delete. @@ -374,6 +374,38 @@ public struct ServiceDiscovery: AWSService { return try await self.deleteService(input, logger: logger) } + /// Deletes specific attributes associated with a service. + @Sendable + @inlinable + public func deleteServiceAttributes(_ input: DeleteServiceAttributesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteServiceAttributesResponse { + try await self.client.execute( + operation: "DeleteServiceAttributes", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes specific attributes associated with a service. + /// + /// Parameters: + /// - attributes: A list of keys corresponding to each attribute that you want to delete. + /// - serviceId: The ID of the service from which the attributes will be deleted. + /// - logger: Logger use during operation + @inlinable + public func deleteServiceAttributes( + attributes: [String], + serviceId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DeleteServiceAttributesResponse { + let input = DeleteServiceAttributesRequest( + attributes: attributes, + serviceId: serviceId + ) + return try await self.deleteServiceAttributes(input, logger: logger) + } + /// Deletes the Amazon Route 53 DNS records and health check, if any, that Cloud Map created for the specified instance. @Sendable @inlinable @@ -641,6 +673,35 @@ public struct ServiceDiscovery: AWSService { return try await self.getService(input, logger: logger) } + /// Returns the attributes associated with a specified service. + @Sendable + @inlinable + public func getServiceAttributes(_ input: GetServiceAttributesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetServiceAttributesResponse { + try await self.client.execute( + operation: "GetServiceAttributes", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns the attributes associated with a specified service. + /// + /// Parameters: + /// - serviceId: The ID of the service that you want to get attributes for. + /// - logger: Logger use during operation + @inlinable + public func getServiceAttributes( + serviceId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetServiceAttributesResponse { + let input = GetServiceAttributesRequest( + serviceId: serviceId + ) + return try await self.getServiceAttributes(input, logger: logger) + } + /// Lists summary information about the instances that you registered by using a specified service. @Sendable @inlinable @@ -1069,7 +1130,7 @@ public struct ServiceDiscovery: AWSService { /// /// Parameters: /// - id: The ID of the service that you want to update. - /// - service: A complex type that contains the new settings for the service. + /// - service: A complex type that contains the new settings for the service. You can specify a maximum of 30 attributes (key-value pairs). /// - logger: Logger use during operation @inlinable public func updateService( @@ -1083,6 +1144,38 @@ public struct ServiceDiscovery: AWSService { ) return try await self.updateService(input, logger: logger) } + + /// Submits a request to update a specified service to add service-level attributes. + @Sendable + @inlinable + public func updateServiceAttributes(_ input: UpdateServiceAttributesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateServiceAttributesResponse { + try await self.client.execute( + operation: "UpdateServiceAttributes", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Submits a request to update a specified service to add service-level attributes. + /// + /// Parameters: + /// - attributes: A string map that contains attribute key-value pairs. + /// - serviceId: The ID of the service that you want to update. + /// - logger: Logger use during operation + @inlinable + public func updateServiceAttributes( + attributes: [String: String], + serviceId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateServiceAttributesResponse { + let input = UpdateServiceAttributesRequest( + attributes: attributes, + serviceId: serviceId + ) + return try await self.updateServiceAttributes(input, logger: logger) + } } extension ServiceDiscovery { diff --git a/Sources/Soto/Services/ServiceDiscovery/ServiceDiscovery_shapes.swift b/Sources/Soto/Services/ServiceDiscovery/ServiceDiscovery_shapes.swift index 34a51cb4c9..6622b06b51 100644 --- a/Sources/Soto/Services/ServiceDiscovery/ServiceDiscovery_shapes.swift +++ b/Sources/Soto/Services/ServiceDiscovery/ServiceDiscovery_shapes.swift @@ -417,6 +417,37 @@ extension ServiceDiscovery { } } + public struct DeleteServiceAttributesRequest: AWSEncodableShape { + /// A list of keys corresponding to each attribute that you want to delete. + public let attributes: [String] + /// The ID of the service from which the attributes will be deleted. + public let serviceId: String + + @inlinable + public init(attributes: [String], serviceId: String) { + self.attributes = attributes + self.serviceId = serviceId + } + + public func validate(name: String) throws { + try self.attributes.forEach { + try validate($0, name: "attributes[]", parent: name, max: 255) + } + try self.validate(self.attributes, name: "attributes", parent: name, max: 30) + try self.validate(self.attributes, name: "attributes", parent: name, min: 1) + try self.validate(self.serviceId, name: "serviceId", parent: name, max: 64) + } + + private enum CodingKeys: String, CodingKey { + case attributes = "Attributes" + case serviceId = "ServiceId" + } + } + + public struct DeleteServiceAttributesResponse: AWSDecodableShape { + public init() {} + } + public struct DeleteServiceRequest: AWSEncodableShape { /// The ID of the service that you want to delete. public let id: String @@ -504,6 +535,7 @@ extension ServiceDiscovery { try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.namespaceName, name: "namespaceName", parent: name, max: 1024) + try self.validate(self.namespaceName, name: "namespaceName", parent: name, pattern: "^[!-~]{1,1024}$") try self.optionalParameters?.forEach { try validate($0.key, name: "optionalParameters.key", parent: name, max: 255) try validate($0.key, name: "optionalParameters.key", parent: name, pattern: "^[a-zA-Z0-9!-~]+$") @@ -561,6 +593,7 @@ extension ServiceDiscovery { public func validate(name: String) throws { try self.validate(self.namespaceName, name: "namespaceName", parent: name, max: 1024) + try self.validate(self.namespaceName, name: "namespaceName", parent: name, pattern: "^[!-~]{1,1024}$") try self.validate(self.serviceName, name: "serviceName", parent: name, pattern: "^((?=^.{1,127}$)^([a-zA-Z0-9_][a-zA-Z0-9-_]{0,61}[a-zA-Z0-9_]|[a-zA-Z0-9])(\\.([a-zA-Z0-9_][a-zA-Z0-9-_]{0,61}[a-zA-Z0-9_]|[a-zA-Z0-9]))*$)|(^\\.$)$") } @@ -585,7 +618,7 @@ extension ServiceDiscovery { } public struct DnsConfig: AWSEncodableShape & AWSDecodableShape { - /// An array that contains one DnsRecord object for each Route 53 DNS record that you want Cloud Map to create when you register an instance. + /// An array that contains one DnsRecord object for each Route 53 DNS record that you want Cloud Map to create when you register an instance. The record type of a service specified in a DnsRecord object can't be updated. To change a record type, you need to delete the service and recreate it with a new DnsConfig. public let dnsRecords: [DnsRecord] /// Use NamespaceId in Service instead. The ID of the namespace to use for DNS configuration. public let namespaceId: String? @@ -838,6 +871,38 @@ extension ServiceDiscovery { } } + public struct GetServiceAttributesRequest: AWSEncodableShape { + /// The ID of the service that you want to get attributes for. + public let serviceId: String + + @inlinable + public init(serviceId: String) { + self.serviceId = serviceId + } + + public func validate(name: String) throws { + try self.validate(self.serviceId, name: "serviceId", parent: name, max: 64) + } + + private enum CodingKeys: String, CodingKey { + case serviceId = "ServiceId" + } + } + + public struct GetServiceAttributesResponse: AWSDecodableShape { + /// A complex type that contains the service ARN and a list of attribute key-value pairs associated with the service. + public let serviceAttributes: ServiceAttributes? + + @inlinable + public init(serviceAttributes: ServiceAttributes? = nil) { + self.serviceAttributes = serviceAttributes + } + + private enum CodingKeys: String, CodingKey { + case serviceAttributes = "ServiceAttributes" + } + } + public struct GetServiceRequest: AWSEncodableShape { /// The ID of the service that you want to get settings for. public let id: String @@ -1815,6 +1880,24 @@ extension ServiceDiscovery { } } + public struct ServiceAttributes: AWSDecodableShape { + /// A string map that contains the following information for the service that you specify in ServiceArn: The attributes that apply to the service. For each attribute, the applicable value. You can specify a total of 30 attributes. + public let attributes: [String: String]? + /// The ARN of the service that the attributes are associated with. + public let serviceArn: String? + + @inlinable + public init(attributes: [String: String]? = nil, serviceArn: String? = nil) { + self.attributes = attributes + self.serviceArn = serviceArn + } + + private enum CodingKeys: String, CodingKey { + case attributes = "Attributes" + case serviceArn = "ServiceArn" + } + } + public struct ServiceChange: AWSEncodableShape { /// A description for the service. public let description: String? @@ -2162,10 +2245,42 @@ extension ServiceDiscovery { } } + public struct UpdateServiceAttributesRequest: AWSEncodableShape { + /// A string map that contains attribute key-value pairs. + public let attributes: [String: String] + /// The ID of the service that you want to update. + public let serviceId: String + + @inlinable + public init(attributes: [String: String], serviceId: String) { + self.attributes = attributes + self.serviceId = serviceId + } + + public func validate(name: String) throws { + try self.attributes.forEach { + try validate($0.key, name: "attributes.key", parent: name, max: 255) + try validate($0.value, name: "attributes[\"\($0.key)\"]", parent: name, max: 1024) + } + try self.validate(self.attributes, name: "attributes", parent: name, max: 30) + try self.validate(self.attributes, name: "attributes", parent: name, min: 1) + try self.validate(self.serviceId, name: "serviceId", parent: name, max: 64) + } + + private enum CodingKeys: String, CodingKey { + case attributes = "Attributes" + case serviceId = "ServiceId" + } + } + + public struct UpdateServiceAttributesResponse: AWSDecodableShape { + public init() {} + } + public struct UpdateServiceRequest: AWSEncodableShape { /// The ID of the service that you want to update. public let id: String - /// A complex type that contains the new settings for the service. + /// A complex type that contains the new settings for the service. You can specify a maximum of 30 attributes (key-value pairs). public let service: ServiceChange @inlinable @@ -2217,6 +2332,7 @@ public struct ServiceDiscoveryErrorType: AWSErrorType { case resourceLimitExceeded = "ResourceLimitExceeded" case resourceNotFoundException = "ResourceNotFoundException" case serviceAlreadyExists = "ServiceAlreadyExists" + case serviceAttributesLimitExceededException = "ServiceAttributesLimitExceededException" case serviceNotFound = "ServiceNotFound" case tooManyTagsException = "TooManyTagsException" } @@ -2263,6 +2379,8 @@ public struct ServiceDiscoveryErrorType: AWSErrorType { public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } /// The service can't be created because a service with the same name already exists. public static var serviceAlreadyExists: Self { .init(.serviceAlreadyExists) } + /// The attribute can't be added to the service because you've exceeded the quota for the number of attributes you can add to a service. + public static var serviceAttributesLimitExceededException: Self { .init(.serviceAttributesLimitExceededException) } /// No service exists with the specified ID. public static var serviceNotFound: Self { .init(.serviceNotFound) } /// The list of tags on the resource is over the quota. The maximum number of tags that can be applied to a resource is 50. diff --git a/Sources/Soto/Services/Synthetics/Synthetics_shapes.swift b/Sources/Soto/Services/Synthetics/Synthetics_shapes.swift index 89a13c3759..f3b4ebfaad 100644 --- a/Sources/Soto/Services/Synthetics/Synthetics_shapes.swift +++ b/Sources/Soto/Services/Synthetics/Synthetics_shapes.swift @@ -143,7 +143,7 @@ extension Synthetics { try self.validate(self.groupIdentifier, name: "groupIdentifier", parent: name, min: 1) try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 2048) try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 1) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:canary:[0-9a-z_\\-]{1,255}$") + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2,4}(-[a-z]{2,4})?-[a-z]+-\\d{1}:\\d{12}:canary:[0-9a-z_\\-]{1,255}$") } private enum CodingKeys: String, CodingKey { @@ -952,7 +952,7 @@ extension Synthetics { try self.validate(self.groupIdentifier, name: "groupIdentifier", parent: name, min: 1) try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 2048) try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 1) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:canary:[0-9a-z_\\-]{1,255}$") + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2,4}(-[a-z]{2,4})?-[a-z]+-\\d{1}:\\d{12}:canary:[0-9a-z_\\-]{1,255}$") } private enum CodingKeys: String, CodingKey { @@ -1183,7 +1183,7 @@ extension Synthetics { try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^.+$") try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 2048) try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 1) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:canary:[0-9a-z_\\-]{1,255}$") + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2,4}(-[a-z]{2,4})?-[a-z]+-\\d{1}:\\d{12}:canary:[0-9a-z_\\-]{1,255}$") } private enum CodingKeys: String, CodingKey { @@ -1329,7 +1329,7 @@ extension Synthetics { public func validate(name: String) throws { try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 2048) try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 1) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:(canary|group):[0-9a-z_\\-]+$") + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2,4}(-[a-z]{2,4})?-[a-z]+-\\d{1}:\\d{12}:(canary|group):[0-9a-z_\\-]+$") } private enum CodingKeys: CodingKey {} @@ -1390,7 +1390,7 @@ extension Synthetics { public func validate(name: String) throws { try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, max: 2048) try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, min: 1) - try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, pattern: "^arn:(aws[a-zA-Z-]*)?:kms:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:key/[\\w\\-\\/]+$") + try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, pattern: "^arn:(aws[a-zA-Z-]*)?:kms:[a-z]{2,4}(-[a-z]{2,4})?-[a-z]+-\\d{1}:\\d{12}:key/[\\w\\-\\/]+$") } private enum CodingKeys: String, CodingKey { @@ -1478,7 +1478,7 @@ extension Synthetics { public func validate(name: String) throws { try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 2048) try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 1) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:(canary|group):[0-9a-z_\\-]+$") + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2,4}(-[a-z]{2,4})?-[a-z]+-\\d{1}:\\d{12}:(canary|group):[0-9a-z_\\-]+$") try self.tags.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) @@ -1520,7 +1520,7 @@ extension Synthetics { public func validate(name: String) throws { try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 2048) try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 1) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:(canary|group):[0-9a-z_\\-]+$") + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2,4}(-[a-z]{2,4})?-[a-z]+-\\d{1}:\\d{12}:(canary|group):[0-9a-z_\\-]+$") try self.tagKeys.forEach { try validate($0, name: "tagKeys[]", parent: name, max: 128) try validate($0, name: "tagKeys[]", parent: name, min: 1) @@ -1644,7 +1644,7 @@ extension Synthetics { } public struct VisualReferenceInput: AWSEncodableShape { - /// Specifies which canary run to use the screenshots from as the baseline for future visual monitoring with this canary. Valid values are nextrun to use the screenshots from the next run after this update is made, lastrun to use the screenshots from the most recent run before this update was made, or the value of Id in the CanaryRun from any past run of this canary. + /// Specifies which canary run to use the screenshots from as the baseline for future visual monitoring with this canary. Valid values are nextrun to use the screenshots from the next run after this update is made, lastrun to use the screenshots from the most recent run before this update was made, or the value of Id in the CanaryRun from a run of this a canary in the past 31 days. If you specify the Id of a canary run older than 31 days, the operation returns a 400 validation exception error.. public let baseCanaryRunId: String /// An array of screenshots that will be used as the baseline for visual monitoring in future runs of this canary. If there is a screenshot that you don't want to be used for visual monitoring, remove it from this array. public let baseScreenshots: [BaseScreenshot]? @@ -1688,13 +1688,16 @@ extension Synthetics { } public struct VpcConfigInput: AWSEncodableShape { + /// Set this to true to allow outbound IPv6 traffic on VPC canaries that are connected to dual-stack subnets. The default is false + public let ipv6AllowedForDualStack: Bool? /// The IDs of the security groups for this canary. public let securityGroupIds: [String]? /// The IDs of the subnets where this canary is to run. public let subnetIds: [String]? @inlinable - public init(securityGroupIds: [String]? = nil, subnetIds: [String]? = nil) { + public init(ipv6AllowedForDualStack: Bool? = nil, securityGroupIds: [String]? = nil, subnetIds: [String]? = nil) { + self.ipv6AllowedForDualStack = ipv6AllowedForDualStack self.securityGroupIds = securityGroupIds self.subnetIds = subnetIds } @@ -1705,12 +1708,15 @@ extension Synthetics { } private enum CodingKeys: String, CodingKey { + case ipv6AllowedForDualStack = "Ipv6AllowedForDualStack" case securityGroupIds = "SecurityGroupIds" case subnetIds = "SubnetIds" } } public struct VpcConfigOutput: AWSDecodableShape { + /// Indicates whether this canary allows outbound IPv6 traffic if it is connected to dual-stack subnets. + public let ipv6AllowedForDualStack: Bool? /// The IDs of the security groups for this canary. public let securityGroupIds: [String]? /// The IDs of the subnets where this canary is to run. @@ -1719,13 +1725,15 @@ extension Synthetics { public let vpcId: String? @inlinable - public init(securityGroupIds: [String]? = nil, subnetIds: [String]? = nil, vpcId: String? = nil) { + public init(ipv6AllowedForDualStack: Bool? = nil, securityGroupIds: [String]? = nil, subnetIds: [String]? = nil, vpcId: String? = nil) { + self.ipv6AllowedForDualStack = ipv6AllowedForDualStack self.securityGroupIds = securityGroupIds self.subnetIds = subnetIds self.vpcId = vpcId } private enum CodingKeys: String, CodingKey { + case ipv6AllowedForDualStack = "Ipv6AllowedForDualStack" case securityGroupIds = "SecurityGroupIds" case subnetIds = "SubnetIds" case vpcId = "VpcId" diff --git a/Sources/Soto/Services/TimestreamInfluxDB/TimestreamInfluxDB_api.swift b/Sources/Soto/Services/TimestreamInfluxDB/TimestreamInfluxDB_api.swift index 2475d3aa5d..0b6e9ab0e4 100644 --- a/Sources/Soto/Services/TimestreamInfluxDB/TimestreamInfluxDB_api.swift +++ b/Sources/Soto/Services/TimestreamInfluxDB/TimestreamInfluxDB_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS TimestreamInfluxDB service. /// -/// Amazon Timestream for InfluxDB is a managed time-series database engine that makes it easy for application developers and DevOps teams to run InfluxDB databases on AWS for near real-time time-series applications using open-source APIs. With Amazon Timestream for InfluxDB, it is easy to set up, operate, and scale time-series workloads that can answer queries with single-digit millisecond query response time. +/// Amazon Timestream for InfluxDB is a managed time-series database engine that makes it easy for application developers and DevOps teams to run InfluxDB databases on Amazon Web Services for near real-time time-series applications using open-source APIs. With Amazon Timestream for InfluxDB, it is easy to set up, operate, and scale time-series workloads that can answer queries with single-digit millisecond query response time. public struct TimestreamInfluxDB: AWSService { // MARK: Member variables @@ -104,8 +104,9 @@ public struct TimestreamInfluxDB: AWSService { /// - deploymentType: Specifies whether the DB instance will be deployed as a standalone instance or with a Multi-AZ standby for high availability. /// - logDeliveryConfiguration: Configuration for sending InfluxDB engine logs to a specified S3 bucket. /// - name: The name that uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and CLI commands. This name will also be a prefix included in the endpoint. DB instance names must be unique per customer and per region. + /// - networkType: Specifies whether the networkType of the Timestream for InfluxDB instance is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols. /// - organization: The name of the initial organization for the initial admin user in InfluxDB. An InfluxDB organization is a workspace for a group of users. - /// - password: The password of the initial admin user created in InfluxDB. This password will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. These attributes will be stored in a Secret created in AWS SecretManager in your account. + /// - password: The password of the initial admin user created in InfluxDB. This password will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. These attributes will be stored in a Secret created in Amazon Web Services SecretManager in your account. /// - port: The port number on which InfluxDB accepts connections. Valid Values: 1024-65535 Default: 8086 Constraints: The value can't be 2375-2376, 7788-7799, 8090, or 51678-51680 /// - publiclyAccessible: Configures the DB instance with a public IP to facilitate access. /// - tags: A list of key-value pairs to associate with the DB instance. @@ -123,6 +124,7 @@ public struct TimestreamInfluxDB: AWSService { deploymentType: DeploymentType? = nil, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, name: String, + networkType: NetworkType? = nil, organization: String? = nil, password: String, port: Int? = nil, @@ -142,6 +144,7 @@ public struct TimestreamInfluxDB: AWSService { deploymentType: deploymentType, logDeliveryConfiguration: logDeliveryConfiguration, name: name, + networkType: networkType, organization: organization, password: password, port: port, diff --git a/Sources/Soto/Services/TimestreamInfluxDB/TimestreamInfluxDB_shapes.swift b/Sources/Soto/Services/TimestreamInfluxDB/TimestreamInfluxDB_shapes.swift index 3f98e1b880..7c342c3b67 100644 --- a/Sources/Soto/Services/TimestreamInfluxDB/TimestreamInfluxDB_shapes.swift +++ b/Sources/Soto/Services/TimestreamInfluxDB/TimestreamInfluxDB_shapes.swift @@ -66,6 +66,12 @@ extension TimestreamInfluxDB { public var description: String { return self.rawValue } } + public enum NetworkType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case dual = "DUAL" + case ipv4 = "IPV4" + public var description: String { return self.rawValue } + } + public enum Status: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case available = "AVAILABLE" case creating = "CREATING" @@ -104,9 +110,11 @@ extension TimestreamInfluxDB { public let logDeliveryConfiguration: LogDeliveryConfiguration? /// The name that uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and CLI commands. This name will also be a prefix included in the endpoint. DB instance names must be unique per customer and per region. public let name: String + /// Specifies whether the networkType of the Timestream for InfluxDB instance is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols. + public let networkType: NetworkType? /// The name of the initial organization for the initial admin user in InfluxDB. An InfluxDB organization is a workspace for a group of users. public let organization: String? - /// The password of the initial admin user created in InfluxDB. This password will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. These attributes will be stored in a Secret created in AWS SecretManager in your account. + /// The password of the initial admin user created in InfluxDB. This password will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. These attributes will be stored in a Secret created in Amazon Web Services SecretManager in your account. public let password: String /// The port number on which InfluxDB accepts connections. Valid Values: 1024-65535 Default: 8086 Constraints: The value can't be 2375-2376, 7788-7799, 8090, or 51678-51680 public let port: Int? @@ -122,7 +130,7 @@ extension TimestreamInfluxDB { public let vpcSubnetIds: [String] @inlinable - public init(allocatedStorage: Int, bucket: String? = nil, dbInstanceType: DbInstanceType, dbParameterGroupIdentifier: String? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, name: String, organization: String? = nil, password: String, port: Int? = nil, publiclyAccessible: Bool? = nil, tags: [String: String]? = nil, username: String? = nil, vpcSecurityGroupIds: [String], vpcSubnetIds: [String]) { + public init(allocatedStorage: Int, bucket: String? = nil, dbInstanceType: DbInstanceType, dbParameterGroupIdentifier: String? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, name: String, networkType: NetworkType? = nil, organization: String? = nil, password: String, port: Int? = nil, publiclyAccessible: Bool? = nil, tags: [String: String]? = nil, username: String? = nil, vpcSecurityGroupIds: [String], vpcSubnetIds: [String]) { self.allocatedStorage = allocatedStorage self.bucket = bucket self.dbInstanceType = dbInstanceType @@ -131,6 +139,7 @@ extension TimestreamInfluxDB { self.deploymentType = deploymentType self.logDeliveryConfiguration = logDeliveryConfiguration self.name = name + self.networkType = networkType self.organization = organization self.password = password self.port = port @@ -192,6 +201,7 @@ extension TimestreamInfluxDB { case deploymentType = "deploymentType" case logDeliveryConfiguration = "logDeliveryConfiguration" case name = "name" + case networkType = "networkType" case organization = "organization" case password = "password" case port = "port" @@ -222,12 +232,14 @@ extension TimestreamInfluxDB { public let endpoint: String? /// A service-generated unique identifier. public let id: String - /// The Amazon Resource Name (ARN) of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password. + /// The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password. public let influxAuthParametersSecretArn: String? /// Configuration for sending InfluxDB engine logs to send to specified S3 bucket. public let logDeliveryConfiguration: LogDeliveryConfiguration? /// The customer-supplied name that uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and CLI commands. public let name: String + /// Specifies whether the networkType of the Timestream for InfluxDB instance is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols. + public let networkType: NetworkType? /// The port number on which InfluxDB accepts connections. The default value is 8086. public let port: Int? /// Indicates if the DB instance has a public IP to facilitate access. @@ -242,7 +254,7 @@ extension TimestreamInfluxDB { public let vpcSubnetIds: [String] @inlinable - public init(allocatedStorage: Int? = nil, arn: String, availabilityZone: String? = nil, dbInstanceType: DbInstanceType? = nil, dbParameterGroupIdentifier: String? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, endpoint: String? = nil, id: String, influxAuthParametersSecretArn: String? = nil, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, name: String, port: Int? = nil, publiclyAccessible: Bool? = nil, secondaryAvailabilityZone: String? = nil, status: Status? = nil, vpcSecurityGroupIds: [String]? = nil, vpcSubnetIds: [String]) { + public init(allocatedStorage: Int? = nil, arn: String, availabilityZone: String? = nil, dbInstanceType: DbInstanceType? = nil, dbParameterGroupIdentifier: String? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, endpoint: String? = nil, id: String, influxAuthParametersSecretArn: String? = nil, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, name: String, networkType: NetworkType? = nil, port: Int? = nil, publiclyAccessible: Bool? = nil, secondaryAvailabilityZone: String? = nil, status: Status? = nil, vpcSecurityGroupIds: [String]? = nil, vpcSubnetIds: [String]) { self.allocatedStorage = allocatedStorage self.arn = arn self.availabilityZone = availabilityZone @@ -255,6 +267,7 @@ extension TimestreamInfluxDB { self.influxAuthParametersSecretArn = influxAuthParametersSecretArn self.logDeliveryConfiguration = logDeliveryConfiguration self.name = name + self.networkType = networkType self.port = port self.publiclyAccessible = publiclyAccessible self.secondaryAvailabilityZone = secondaryAvailabilityZone @@ -276,6 +289,7 @@ extension TimestreamInfluxDB { case influxAuthParametersSecretArn = "influxAuthParametersSecretArn" case logDeliveryConfiguration = "logDeliveryConfiguration" case name = "name" + case networkType = "networkType" case port = "port" case publiclyAccessible = "publiclyAccessible" case secondaryAvailabilityZone = "secondaryAvailabilityZone" @@ -369,15 +383,17 @@ extension TimestreamInfluxDB { public let endpoint: String? /// The service-generated unique identifier of the DB instance. public let id: String - /// This customer-supplied name uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and AWS CLI commands. + /// This customer-supplied name uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and Amazon Web Services CLI commands. public let name: String + /// Specifies whether the networkType of the Timestream for InfluxDB instance is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols. + public let networkType: NetworkType? /// The port number on which InfluxDB accepts connections. public let port: Int? /// The status of the DB instance. public let status: Status? @inlinable - public init(allocatedStorage: Int? = nil, arn: String, dbInstanceType: DbInstanceType? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, endpoint: String? = nil, id: String, name: String, port: Int? = nil, status: Status? = nil) { + public init(allocatedStorage: Int? = nil, arn: String, dbInstanceType: DbInstanceType? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, endpoint: String? = nil, id: String, name: String, networkType: NetworkType? = nil, port: Int? = nil, status: Status? = nil) { self.allocatedStorage = allocatedStorage self.arn = arn self.dbInstanceType = dbInstanceType @@ -386,6 +402,7 @@ extension TimestreamInfluxDB { self.endpoint = endpoint self.id = id self.name = name + self.networkType = networkType self.port = port self.status = status } @@ -399,6 +416,7 @@ extension TimestreamInfluxDB { case endpoint = "endpoint" case id = "id" case name = "name" + case networkType = "networkType" case port = "port" case status = "status" } @@ -469,12 +487,14 @@ extension TimestreamInfluxDB { public let endpoint: String? /// A service-generated unique identifier. public let id: String - /// The Amazon Resource Name (ARN) of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password. + /// The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password. public let influxAuthParametersSecretArn: String? /// Configuration for sending InfluxDB engine logs to send to specified S3 bucket. public let logDeliveryConfiguration: LogDeliveryConfiguration? /// The customer-supplied name that uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and CLI commands. public let name: String + /// Specifies whether the networkType of the Timestream for InfluxDB instance is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols. + public let networkType: NetworkType? /// The port number on which InfluxDB accepts connections. public let port: Int? /// Indicates if the DB instance has a public IP to facilitate access. @@ -489,7 +509,7 @@ extension TimestreamInfluxDB { public let vpcSubnetIds: [String] @inlinable - public init(allocatedStorage: Int? = nil, arn: String, availabilityZone: String? = nil, dbInstanceType: DbInstanceType? = nil, dbParameterGroupIdentifier: String? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, endpoint: String? = nil, id: String, influxAuthParametersSecretArn: String? = nil, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, name: String, port: Int? = nil, publiclyAccessible: Bool? = nil, secondaryAvailabilityZone: String? = nil, status: Status? = nil, vpcSecurityGroupIds: [String]? = nil, vpcSubnetIds: [String]) { + public init(allocatedStorage: Int? = nil, arn: String, availabilityZone: String? = nil, dbInstanceType: DbInstanceType? = nil, dbParameterGroupIdentifier: String? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, endpoint: String? = nil, id: String, influxAuthParametersSecretArn: String? = nil, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, name: String, networkType: NetworkType? = nil, port: Int? = nil, publiclyAccessible: Bool? = nil, secondaryAvailabilityZone: String? = nil, status: Status? = nil, vpcSecurityGroupIds: [String]? = nil, vpcSubnetIds: [String]) { self.allocatedStorage = allocatedStorage self.arn = arn self.availabilityZone = availabilityZone @@ -502,6 +522,7 @@ extension TimestreamInfluxDB { self.influxAuthParametersSecretArn = influxAuthParametersSecretArn self.logDeliveryConfiguration = logDeliveryConfiguration self.name = name + self.networkType = networkType self.port = port self.publiclyAccessible = publiclyAccessible self.secondaryAvailabilityZone = secondaryAvailabilityZone @@ -523,6 +544,7 @@ extension TimestreamInfluxDB { case influxAuthParametersSecretArn = "influxAuthParametersSecretArn" case logDeliveryConfiguration = "logDeliveryConfiguration" case name = "name" + case networkType = "networkType" case port = "port" case publiclyAccessible = "publiclyAccessible" case secondaryAvailabilityZone = "secondaryAvailabilityZone" @@ -589,12 +611,14 @@ extension TimestreamInfluxDB { public let endpoint: String? /// A service-generated unique identifier. public let id: String - /// The Amazon Resource Name (ARN) of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password. + /// The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password. public let influxAuthParametersSecretArn: String? /// Configuration for sending InfluxDB engine logs to send to specified S3 bucket. public let logDeliveryConfiguration: LogDeliveryConfiguration? /// The customer-supplied name that uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and CLI commands. public let name: String + /// Specifies whether the networkType of the Timestream for InfluxDB instance is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols. + public let networkType: NetworkType? /// The port number on which InfluxDB accepts connections. public let port: Int? /// Indicates if the DB instance has a public IP to facilitate access. @@ -609,7 +633,7 @@ extension TimestreamInfluxDB { public let vpcSubnetIds: [String] @inlinable - public init(allocatedStorage: Int? = nil, arn: String, availabilityZone: String? = nil, dbInstanceType: DbInstanceType? = nil, dbParameterGroupIdentifier: String? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, endpoint: String? = nil, id: String, influxAuthParametersSecretArn: String? = nil, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, name: String, port: Int? = nil, publiclyAccessible: Bool? = nil, secondaryAvailabilityZone: String? = nil, status: Status? = nil, vpcSecurityGroupIds: [String]? = nil, vpcSubnetIds: [String]) { + public init(allocatedStorage: Int? = nil, arn: String, availabilityZone: String? = nil, dbInstanceType: DbInstanceType? = nil, dbParameterGroupIdentifier: String? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, endpoint: String? = nil, id: String, influxAuthParametersSecretArn: String? = nil, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, name: String, networkType: NetworkType? = nil, port: Int? = nil, publiclyAccessible: Bool? = nil, secondaryAvailabilityZone: String? = nil, status: Status? = nil, vpcSecurityGroupIds: [String]? = nil, vpcSubnetIds: [String]) { self.allocatedStorage = allocatedStorage self.arn = arn self.availabilityZone = availabilityZone @@ -622,6 +646,7 @@ extension TimestreamInfluxDB { self.influxAuthParametersSecretArn = influxAuthParametersSecretArn self.logDeliveryConfiguration = logDeliveryConfiguration self.name = name + self.networkType = networkType self.port = port self.publiclyAccessible = publiclyAccessible self.secondaryAvailabilityZone = secondaryAvailabilityZone @@ -643,6 +668,7 @@ extension TimestreamInfluxDB { case influxAuthParametersSecretArn = "influxAuthParametersSecretArn" case logDeliveryConfiguration = "logDeliveryConfiguration" case name = "name" + case networkType = "networkType" case port = "port" case publiclyAccessible = "publiclyAccessible" case secondaryAvailabilityZone = "secondaryAvailabilityZone" @@ -1129,12 +1155,14 @@ extension TimestreamInfluxDB { public let endpoint: String? /// A service-generated unique identifier. public let id: String - /// The Amazon Resource Name (ARN) of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password. + /// The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password. public let influxAuthParametersSecretArn: String? /// Configuration for sending InfluxDB engine logs to send to specified S3 bucket. public let logDeliveryConfiguration: LogDeliveryConfiguration? - /// This customer-supplied name uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and AWS CLI commands. + /// This customer-supplied name uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and Amazon Web Services CLI commands. public let name: String + /// Specifies whether the networkType of the Timestream for InfluxDB instance is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols. + public let networkType: NetworkType? /// The port number on which InfluxDB accepts connections. public let port: Int? /// Indicates if the DB instance has a public IP to facilitate access. @@ -1149,7 +1177,7 @@ extension TimestreamInfluxDB { public let vpcSubnetIds: [String] @inlinable - public init(allocatedStorage: Int? = nil, arn: String, availabilityZone: String? = nil, dbInstanceType: DbInstanceType? = nil, dbParameterGroupIdentifier: String? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, endpoint: String? = nil, id: String, influxAuthParametersSecretArn: String? = nil, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, name: String, port: Int? = nil, publiclyAccessible: Bool? = nil, secondaryAvailabilityZone: String? = nil, status: Status? = nil, vpcSecurityGroupIds: [String]? = nil, vpcSubnetIds: [String]) { + public init(allocatedStorage: Int? = nil, arn: String, availabilityZone: String? = nil, dbInstanceType: DbInstanceType? = nil, dbParameterGroupIdentifier: String? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, endpoint: String? = nil, id: String, influxAuthParametersSecretArn: String? = nil, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, name: String, networkType: NetworkType? = nil, port: Int? = nil, publiclyAccessible: Bool? = nil, secondaryAvailabilityZone: String? = nil, status: Status? = nil, vpcSecurityGroupIds: [String]? = nil, vpcSubnetIds: [String]) { self.allocatedStorage = allocatedStorage self.arn = arn self.availabilityZone = availabilityZone @@ -1162,6 +1190,7 @@ extension TimestreamInfluxDB { self.influxAuthParametersSecretArn = influxAuthParametersSecretArn self.logDeliveryConfiguration = logDeliveryConfiguration self.name = name + self.networkType = networkType self.port = port self.publiclyAccessible = publiclyAccessible self.secondaryAvailabilityZone = secondaryAvailabilityZone @@ -1183,6 +1212,7 @@ extension TimestreamInfluxDB { case influxAuthParametersSecretArn = "influxAuthParametersSecretArn" case logDeliveryConfiguration = "logDeliveryConfiguration" case name = "name" + case networkType = "networkType" case port = "port" case publiclyAccessible = "publiclyAccessible" case secondaryAvailabilityZone = "secondaryAvailabilityZone" diff --git a/Sources/Soto/Services/TrustedAdvisor/TrustedAdvisor_api.swift b/Sources/Soto/Services/TrustedAdvisor/TrustedAdvisor_api.swift index c53919df39..44cb006140 100644 --- a/Sources/Soto/Services/TrustedAdvisor/TrustedAdvisor_api.swift +++ b/Sources/Soto/Services/TrustedAdvisor/TrustedAdvisor_api.swift @@ -65,6 +65,7 @@ public struct TrustedAdvisor: AWSService { serviceProtocol: .restjson, apiVersion: "2022-09-15", endpoint: endpoint, + serviceEndpoints: Self.serviceEndpoints, errorType: TrustedAdvisorErrorType.self, middleware: middleware, timeout: timeout, @@ -74,6 +75,12 @@ public struct TrustedAdvisor: AWSService { } + /// custom endpoints for regions + static var serviceEndpoints: [String: String] {[ + "fips-us-east-1": "trustedadvisor-fips.us-east-1.api.aws", + "fips-us-east-2": "trustedadvisor-fips.us-east-2.api.aws", + "fips-us-west-2": "trustedadvisor-fips.us-west-2.api.aws" + ]} diff --git a/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift b/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift index bc6eb7dd8f..9130888b89 100644 --- a/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift +++ b/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift @@ -6108,7 +6108,7 @@ extension WorkSpaces { public let rootVolumeEncryptionEnabled: Bool? /// The tags for the WorkSpace. public let tags: [Tag]? - /// The user name of the user for the WorkSpace. This user name must exist in the Directory Service directory for the WorkSpace. The reserved keyword, [UNDEFINED], is used when creating user-decoupled WorkSpaces. + /// The user name of the user for the WorkSpace. This user name must exist in the Directory Service directory for the WorkSpace. The username is not case-sensitive, but we recommend matching the case in the Directory Service directory to avoid potential incompatibilities. The reserved keyword, [UNDEFINED], is used when creating user-decoupled WorkSpaces. public let userName: String /// Indicates whether the data stored on the user volume is encrypted. public let userVolumeEncryptionEnabled: Bool? diff --git a/models/account.json b/models/account.json index 95a0177d57..09b328e862 100644 --- a/models/account.json +++ b/models/account.json @@ -123,6 +123,9 @@ "aws.auth#sigv4": { "name": "account" }, + "aws.endpoints#standardPartitionalEndpoints": { + "endpointPatternType": "service_region_dnsSuffix" + }, "aws.protocols#restJson1": {}, "smithy.api#cors": {}, "smithy.api#documentation": "

Operations for Amazon Web Services Account Management

", @@ -138,12 +141,6 @@ "smithy.rules#endpointRuleSet": { "version": "1.0", "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, "UseDualStack": { "builtIn": "AWS::UseDualStack", "required": true, @@ -163,6 +160,12 @@ "required": false, "documentation": "Override the endpoint used to send this request", "type": "String" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" } }, "rules": [ @@ -194,263 +197,235 @@ "type": "error" }, { - "conditions": [ + "conditions": [], + "rules": [ { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, - true - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree" } ], "type": "tree" }, { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], + "conditions": [], "rules": [ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { "ref": "Region" } - ], - "assign": "PartitionResult" + ] } ], "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "aws.partition", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws" - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - false - ] + "ref": "Region" + } + ], + "assign": "PartitionResult" } ], - "endpoint": { - "url": "https://account.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "account", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ + "rules": [ { - "fn": "stringEquals", - "argv": [ + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "name" + true ] }, - "aws-cn" - ] - }, - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseFIPS" - }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ { - "ref": "UseDualStack" + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://account-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" }, - false - ] - } - ], - "endpoint": { - "url": "https://account.cn-northwest-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ { - "name": "sigv4", - "signingName": "account", - "signingRegion": "cn-northwest-1" + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] + ], + "type": "tree" }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] }, { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", + "ref": "UseDualStack" + }, + false + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] }, - "supportsDualStack" + true ] } - ] - } - ], - "rules": [ + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://account-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://account-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ], "type": "tree" }, { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseFIPS" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] }, - true - ] - } - ], - "rules": [ - { - "conditions": [ { "fn": "booleanEquals", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] + "ref": "UseDualStack" }, true ] @@ -458,127 +433,130 @@ ], "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://account-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "PartitionResult" - }, - "supportsDualStack" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } ] } - ] - } - ], - "rules": [ + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://account.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://account.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ], "type": "tree" }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://account.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ], "type": "tree" - }, - { - "conditions": [], - "endpoint": { - "url": "https://account.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" } ], "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ], "type": "tree" - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] }, "smithy.rules#endpointTests": { "testCases": [ { - "documentation": "For region aws-global with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region not set and fips disabled", "expect": { "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "account", - "signingRegion": "us-east-1" - } - ] - }, - "url": "https://account.us-east-1.amazonaws.com" + "url": "https://example.com" } }, "params": { - "Region": "aws-global", + "Endpoint": "https://example.com", + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with fips enabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": true + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, "url": "https://account-fips.us-east-1.api.aws" } }, @@ -592,6 +570,14 @@ "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, "url": "https://account-fips.us-east-1.amazonaws.com" } }, @@ -605,6 +591,14 @@ "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, "url": "https://account.us-east-1.api.aws" } }, @@ -622,7 +616,6 @@ "authSchemes": [ { "name": "sigv4", - "signingName": "account", "signingRegion": "us-east-1" } ] @@ -637,75 +630,76 @@ } }, { - "documentation": "For region aws-cn-global with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "account", "signingRegion": "cn-northwest-1" } ] }, - "url": "https://account.cn-northwest-1.amazonaws.com.cn" + "url": "https://account-fips.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "aws-cn-global", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://account-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://account-fips.cn-north-1.amazonaws.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://account-fips.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://account.cn-north-1.api.amazonwebservices.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://account.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "account", "signingRegion": "cn-northwest-1" } ] @@ -714,59 +708,91 @@ } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://account-fips.us-gov-east-1.api.aws" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://account-fips.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://account-fips.us-gov-east-1.amazonaws.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://account-fips.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://account.us-gov-east-1.api.aws" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://account.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://account.us-gov-east-1.amazonaws.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://account.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": false } @@ -786,6 +812,14 @@ "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } + ] + }, "url": "https://account-fips.us-iso-east-1.c2s.ic.gov" } }, @@ -810,6 +844,14 @@ "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } + ] + }, "url": "https://account.us-iso-east-1.c2s.ic.gov" } }, @@ -834,6 +876,14 @@ "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } + ] + }, "url": "https://account-fips.us-isob-east-1.sc2s.sgov.gov" } }, @@ -858,6 +908,14 @@ "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } + ] + }, "url": "https://account.us-isob-east-1.sc2s.sgov.gov" } }, @@ -868,54 +926,131 @@ } }, { - "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://example.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eu-isoe-west-1" + } + ] + }, + "url": "https://account-fips.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { - "Region": "us-east-1", + "Region": "eu-isoe-west-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "eu-isoe-west-1", "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" + "UseDualStack": true } }, { - "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://example.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eu-isoe-west-1" + } + ] + }, + "url": "https://account.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { + "Region": "eu-isoe-west-1", "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", "expect": { - "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-east-1", + "Region": "us-isof-south-1", "UseFIPS": true, - "UseDualStack": false, - "Endpoint": "https://example.com" + "UseDualStack": true } }, { - "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack disabled", "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://account-fips.us-isof-south-1.csp.hci.ic.gov" + } }, "params": { - "Region": "us-east-1", + "Region": "us-isof-south-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://account.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", "UseFIPS": false, - "UseDualStack": true, - "Endpoint": "https://example.com" + "UseDualStack": false } }, { diff --git a/models/application-auto-scaling.json b/models/application-auto-scaling.json index c43f482884..152d7e04d7 100644 --- a/models/application-auto-scaling.json +++ b/models/application-auto-scaling.json @@ -3015,7 +3015,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Represents a predictive scaling policy configuration.\n

" + "smithy.api#documentation": "

\n Represents a predictive scaling policy configuration. Predictive scaling is supported on Amazon ECS services.\n

" } }, "com.amazonaws.applicationautoscaling#PredictiveScalingPredefinedLoadMetricSpecification": { diff --git a/models/appsync.json b/models/appsync.json index 96c9521267..728d9ec457 100644 --- a/models/appsync.json +++ b/models/appsync.json @@ -3545,7 +3545,7 @@ "type": { "target": "com.amazonaws.appsync#DataSourceType", "traits": { - "smithy.api#documentation": "

The type of the data source.

\n
    \n
  • \n

    \n AWS_LAMBDA: The data source is an Lambda function.

    \n
  • \n
  • \n

    \n AMAZON_DYNAMODB: The data source is an Amazon DynamoDB table.

    \n
  • \n
  • \n

    \n AMAZON_ELASTICSEARCH: The data source is an\n Amazon OpenSearch Service domain.

    \n
  • \n
  • \n

    \n AMAZON_OPENSEARCH_SERVICE: The data source is\n an Amazon OpenSearch Service domain.

    \n
  • \n
  • \n

    \n AMAZON_EVENTBRIDGE: The data source is an\n Amazon EventBridge configuration.

    \n
  • \n
  • \n

    \n NONE: There is no data source. Use this type\n when you want to invoke a GraphQL operation without connecting to a data source, such\n as when you're performing data transformation with resolvers or invoking a\n subscription from a mutation.

    \n
  • \n
  • \n

    \n HTTP: The data source is an HTTP\n endpoint.

    \n
  • \n
  • \n

    \n RELATIONAL_DATABASE: The data source is a\n relational database.

    \n
  • \n
" + "smithy.api#documentation": "

The type of the data source.

\n
    \n
  • \n

    \n AWS_LAMBDA: The data source is an Lambda function.

    \n
  • \n
  • \n

    \n AMAZON_DYNAMODB: The data source is an Amazon DynamoDB table.

    \n
  • \n
  • \n

    \n AMAZON_ELASTICSEARCH: The data source is an\n Amazon OpenSearch Service domain.

    \n
  • \n
  • \n

    \n AMAZON_OPENSEARCH_SERVICE: The data source is\n an Amazon OpenSearch Service domain.

    \n
  • \n
  • \n

    \n AMAZON_EVENTBRIDGE: The data source is an\n Amazon EventBridge configuration.

    \n
  • \n
  • \n

    \n AMAZON_BEDROCK_RUNTIME: The data source is the Amazon Bedrock runtime.

    \n
  • \n
  • \n

    \n NONE: There is no data source. Use this type\n when you want to invoke a GraphQL operation without connecting to a data source, such\n as when you're performing data transformation with resolvers or invoking a\n subscription from a mutation.

    \n
  • \n
  • \n

    \n HTTP: The data source is an HTTP\n endpoint.

    \n
  • \n
  • \n

    \n RELATIONAL_DATABASE: The data source is a\n relational database.

    \n
  • \n
" } }, "serviceRoleArn": { diff --git a/models/artifact.json b/models/artifact.json index 2cbf6b493b..90a54e27e5 100644 --- a/models/artifact.json +++ b/models/artifact.json @@ -2,20 +2,22 @@ "smithy": "2.0", "shapes": { "com.amazonaws.artifact#AcceptanceType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "documentation": "Do not require explicit click-through\nacceptance of the Term associated with\nthis Report.", - "value": "PASSTHROUGH", - "name": "PASSTHROUGH" - }, - { - "documentation": "Require explicit click-through acceptance of\nthe Term associated with this Report.", - "value": "EXPLICIT", - "name": "EXPLICIT" + "type": "enum", + "members": { + "PASSTHROUGH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#documentation": "Do not require explicit click-through acceptance\nof the Term associated with this Report", + "smithy.api#enumValue": "PASSTHROUGH" } - ] + }, + "EXPLICIT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#documentation": "Require explicit click-through acceptance of the\nTerm associated with this Report.", + "smithy.api#enumValue": "EXPLICIT" + } + } } }, "com.amazonaws.artifact#AccessDeniedException": { @@ -59,6 +61,40 @@ } ] }, + "com.amazonaws.artifact#AgreementTerms": { + "type": "list", + "member": { + "target": "com.amazonaws.artifact#LongStringAttribute" + }, + "traits": { + "smithy.api#length": { + "max": 10 + } + } + }, + "com.amazonaws.artifact#AgreementType": { + "type": "enum", + "members": { + "CUSTOM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CUSTOM" + } + }, + "DEFAULT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DEFAULT" + } + }, + "MODIFIED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MODIFIED" + } + } + } + }, "com.amazonaws.artifact#Artifact": { "type": "service", "version": "2018-05-10", @@ -66,6 +102,9 @@ { "target": "com.amazonaws.artifact#AccountSettingsResource" }, + { + "target": "com.amazonaws.artifact#CustomerAgreementResource" + }, { "target": "com.amazonaws.artifact#ReportResource" }, @@ -781,6 +820,135 @@ "smithy.api#httpError": 409 } }, + "com.amazonaws.artifact#CustomerAgreementIdAttribute": { + "type": "string", + "traits": { + "smithy.api#pattern": "^customer-agreement-[a-zA-Z0-9]{16}$" + } + }, + "com.amazonaws.artifact#CustomerAgreementList": { + "type": "list", + "member": { + "target": "com.amazonaws.artifact#CustomerAgreementSummary" + } + }, + "com.amazonaws.artifact#CustomerAgreementResource": { + "type": "resource", + "operations": [ + { + "target": "com.amazonaws.artifact#ListCustomerAgreements" + } + ] + }, + "com.amazonaws.artifact#CustomerAgreementState": { + "type": "enum", + "members": { + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACTIVE" + } + }, + "CUSTOMER_TERMINATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CUSTOMER_TERMINATED" + } + }, + "AWS_TERMINATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_TERMINATED" + } + } + } + }, + "com.amazonaws.artifact#CustomerAgreementSummary": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.artifact#LongStringAttribute", + "traits": { + "smithy.api#documentation": "

Name of the customer-agreement resource.

" + } + }, + "arn": { + "target": "com.amazonaws.artifact#LongStringAttribute", + "traits": { + "smithy.api#documentation": "

ARN of the customer-agreement resource.

" + } + }, + "id": { + "target": "com.amazonaws.artifact#CustomerAgreementIdAttribute", + "traits": { + "smithy.api#documentation": "

Identifier of the customer-agreement resource.

" + } + }, + "agreementArn": { + "target": "com.amazonaws.artifact#LongStringAttribute", + "traits": { + "smithy.api#documentation": "

ARN of the agreement resource the customer-agreement resource represents.

" + } + }, + "awsAccountId": { + "target": "com.amazonaws.artifact#ShortStringAttribute", + "traits": { + "smithy.api#documentation": "

AWS account Id that owns the resource.

" + } + }, + "organizationArn": { + "target": "com.amazonaws.artifact#LongStringAttribute", + "traits": { + "smithy.api#documentation": "

ARN of the organization that owns the resource.

" + } + }, + "effectiveStart": { + "target": "com.amazonaws.artifact#TimestampAttribute", + "traits": { + "smithy.api#documentation": "

Timestamp indicating when the agreement became effective.

" + } + }, + "effectiveEnd": { + "target": "com.amazonaws.artifact#TimestampAttribute", + "traits": { + "smithy.api#documentation": "

Timestamp indicating when the agreement was terminated.

" + } + }, + "state": { + "target": "com.amazonaws.artifact#CustomerAgreementState", + "traits": { + "smithy.api#documentation": "

State of the resource.

" + } + }, + "description": { + "target": "com.amazonaws.artifact#LongStringAttribute", + "traits": { + "smithy.api#documentation": "

Description of the resource.

" + } + }, + "acceptanceTerms": { + "target": "com.amazonaws.artifact#AgreementTerms", + "traits": { + "smithy.api#documentation": "

Terms required to accept the agreement resource.

" + } + }, + "terminateTerms": { + "target": "com.amazonaws.artifact#AgreementTerms", + "traits": { + "smithy.api#documentation": "

Terms required to terminate the customer-agreement resource.

" + } + }, + "type": { + "target": "com.amazonaws.artifact#AgreementType", + "traits": { + "smithy.api#documentation": "

Type of the customer-agreement resource.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Summary for customer-agreement resource.

" + } + }, "com.amazonaws.artifact#GetAccountSettings": { "type": "operation", "input": { @@ -890,8 +1058,8 @@ "title": "Invoke GetReport operation on the latest version of a specific report", "documentation": "The GetReport operation is invoked on a reportId and on a optional version.\n Callers must provide a termToken, which is provided by the GetTermForReport\n operation. If callers do not provide a version, it will default to the\n report's latest version", "input": { - "reportId": "report-1hVFddebtfDNJAUf", - "termToken": "term-token-gPFEGk7CF4wS901w7ppYclt7" + "reportId": "report-abcdef0123456789", + "termToken": "term-token-abcdefghijklm01234567890" }, "output": { "documentPresignedUrl": "" @@ -945,19 +1113,19 @@ }, "output": { "reportDetails": { - "arn": "arn:aws:artifact:us-east-1::report/report-bqhUJF3FrQZsMJpb:1", + "arn": "arn:aws:artifact:us-east-1::report/report-abcdef0123456789:1", "category": "Artifact Category", "companyName": "AWS", "createdAt": "2022-05-27T23:17:00.343940Z", "description": "Description of report", - "id": "report-bqhUJF3FrQZsMJpb", + "id": "report-abcdef0123456789", "name": "Name of report", "periodEnd": "2022-04-01T20:32:04Z", "periodStart": "2022-04-01T20:32:04Z", "productName": "Product of report", "series": "Artifact Series", "state": "PUBLISHED", - "termArn": "arn:aws:artifact:us-east-1::term/term-gLJGG12NyPtYcmtu:1", + "termArn": "arn:aws:artifact:us-east-1::term/term-abcdef0123456789:1", "version": 1 } } @@ -1095,10 +1263,10 @@ "title": "Invoke GetTermForReport operation on the latest version of a specific report", "documentation": "The GetTermForReport operation is invoked on a reportId and on a optional version.\n If callers do not provide a version, it will default to the report's latest version.", "input": { - "reportId": "report-bqhUJF3FrQZsMJpb" + "reportId": "report-abcdef0123456789" }, "output": { - "termToken": "term-token-gPFEGk7CF4wS901w7ppYclt7", + "termToken": "term-token-abcdefghijklm01234567890", "documentPresignedUrl": "" } } @@ -1182,6 +1350,116 @@ "smithy.api#retryable": {} } }, + "com.amazonaws.artifact#ListCustomerAgreements": { + "type": "operation", + "input": { + "target": "com.amazonaws.artifact#ListCustomerAgreementsRequest" + }, + "output": { + "target": "com.amazonaws.artifact#ListCustomerAgreementsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.artifact#AccessDeniedException" + }, + { + "target": "com.amazonaws.artifact#InternalServerException" + }, + { + "target": "com.amazonaws.artifact#ThrottlingException" + }, + { + "target": "com.amazonaws.artifact#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

List active customer-agreements applicable to calling identity.

", + "smithy.api#examples": [ + { + "title": "Invoke ListCustomerAgreements operation", + "documentation": "The ListCustomerAgreements operation returns a collection of customer-agreement resources in the ACTIVE state for the calling credential.", + "input": {}, + "output": { + "customerAgreements": [ + { + "name": "Name of agreement", + "arn": "arn:aws:artifact::111111111111:customer-agreement/customer-agreement-abcdef0123456789", + "id": "customer-agreement-abcdef0123456789", + "agreementArn": "arn:aws:artifact:::agreement/agreement-abcdef0123456789", + "awsAccountId": "111111111111", + "description": "Description of agreement", + "effectiveStart": "2022-04-01T20:32:04Z", + "type": "DEFAULT", + "state": "ACTIVE", + "acceptanceTerms": [ + "terms acknowledged when agreement was accepted" + ], + "terminateTerms": [ + "terms that must be acknowledged to terminate this agreement" + ] + } + ], + "nextToken": "gPFEGk7CF4wS901w7ppYclt7gPFEGk7CF4wS901w7ppYclt7gPFEGk7CF4wS901w7ppYclt7" + } + } + ], + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v1/customer-agreement/list" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "customerAgreements" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.artifact#ListCustomerAgreementsRequest": { + "type": "structure", + "members": { + "maxResults": { + "target": "com.amazonaws.artifact#MaxResultsAttribute", + "traits": { + "smithy.api#documentation": "

Maximum number of resources to return in the paginated response.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.artifact#NextTokenAttribute", + "traits": { + "smithy.api#documentation": "

Pagination token to request the next page of resources.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.artifact#ListCustomerAgreementsResponse": { + "type": "structure", + "members": { + "customerAgreements": { + "target": "com.amazonaws.artifact#CustomerAgreementList", + "traits": { + "smithy.api#documentation": "

List of customer-agreement resources.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.artifact#NextTokenAttribute", + "traits": { + "smithy.api#documentation": "

Pagination token to request the next page of resources.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.artifact#ListReports": { "type": "operation", "input": { @@ -1220,11 +1498,11 @@ "output": { "reports": [ { - "arn": "arn:aws:artifact:us-east-1::report/report-bqhUJF3FrQZsMJpb", + "arn": "arn:aws:artifact:us-east-1::report/report-abcdef0123456789", "category": "Artifact Category", "companyName": "AWS", "description": "Description of report", - "id": "report-bqhUJF3FrQZsMJpb", + "id": "report-abcdef0123456789", "name": "Name of report", "periodEnd": "2022-04-01T20:32:04Z", "periodStart": "2022-04-01T20:32:04Z", @@ -1323,37 +1601,37 @@ } }, "com.amazonaws.artifact#NotificationSubscriptionStatus": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "SUBSCRIBED", - "name": "SUBSCRIBED", - "documentation": "The account is subscribed for notification." - }, - { - "value": "NOT_SUBSCRIBED", - "name": "NOT_SUBSCRIBED", - "documentation": "The account is not subscribed for notification." + "type": "enum", + "members": { + "SUBSCRIBED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUBSCRIBED" } - ] + }, + "NOT_SUBSCRIBED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOT_SUBSCRIBED" + } + } } }, "com.amazonaws.artifact#PublishedState": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "PUBLISHED", - "name": "PUBLISHED", - "documentation": "The resource is published for consumption." - }, - { - "value": "UNPUBLISHED", - "name": "UNPUBLISHED", - "documentation": "The resource is not published for consumption." + "type": "enum", + "members": { + "PUBLISHED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PUBLISHED" } - ] + }, + "UNPUBLISHED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNPUBLISHED" + } + } } }, "com.amazonaws.artifact#PutAccountSettings": { @@ -1850,26 +2128,32 @@ } }, "com.amazonaws.artifact#UploadState": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "PROCESSING", - "name": "PROCESSING" - }, - { - "value": "COMPLETE", - "name": "COMPLETE" - }, - { - "value": "FAILED", - "name": "FAILED" - }, - { - "value": "FAULT", - "name": "FAULT" + "type": "enum", + "members": { + "PROCESSING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PROCESSING" } - ] + }, + "COMPLETE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPLETE" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "FAULT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAULT" + } + } } }, "com.amazonaws.artifact#ValidationException": { diff --git a/models/backup.json b/models/backup.json index b99cf3683b..0b959d5a08 100644 --- a/models/backup.json +++ b/models/backup.json @@ -743,6 +743,12 @@ "traits": { "smithy.api#documentation": "

The timezone in which the schedule expression is set. By default, \n ScheduleExpressions are in UTC. You can modify this to a specified timezone.

" } + }, + "IndexActions": { + "target": "com.amazonaws.backup#IndexActions", + "traits": { + "smithy.api#documentation": "

IndexActions is an array you use to specify how backup data should \n be indexed.

\n

eEach BackupRule can have 0 or 1 IndexAction, as each backup can have up \n to one index associated with it.

\n

Within the array is ResourceType. Only one will be accepted for each BackupRule.

" + } } }, "traits": { @@ -813,6 +819,12 @@ "traits": { "smithy.api#documentation": "

The timezone in which the schedule expression is set. By default, \n ScheduleExpressions are in UTC. You can modify this to a specified timezone.

" } + }, + "IndexActions": { + "target": "com.amazonaws.backup#IndexActions", + "traits": { + "smithy.api#documentation": "

There can up to one IndexAction in each BackupRule, as each backup \n can have 0 or 1 backup index associated with it.

\n

Within the array is ResourceTypes. Only 1 resource type will \n be accepted for each BackupRule. Valid values:

\n
    \n
  • \n

    \n EBS for Amazon Elastic Block Store

    \n
  • \n
  • \n

    \n S3 for Amazon Simple Storage Service (Amazon S3)

    \n
  • \n
" + } } }, "traits": { @@ -2850,6 +2862,9 @@ { "target": "com.amazonaws.backup#GetLegalHold" }, + { + "target": "com.amazonaws.backup#GetRecoveryPointIndexDetails" + }, { "target": "com.amazonaws.backup#GetRecoveryPointRestoreMetadata" }, @@ -2898,6 +2913,9 @@ { "target": "com.amazonaws.backup#ListFrameworks" }, + { + "target": "com.amazonaws.backup#ListIndexedRecoveryPoints" + }, { "target": "com.amazonaws.backup#ListLegalHolds" }, @@ -2982,6 +3000,9 @@ { "target": "com.amazonaws.backup#UpdateGlobalSettings" }, + { + "target": "com.amazonaws.backup#UpdateRecoveryPointIndexSettings" + }, { "target": "com.amazonaws.backup#UpdateRecoveryPointLifecycle" }, @@ -5475,6 +5496,18 @@ "traits": { "smithy.api#documentation": "

The type of vault in which the described recovery point is stored.

" } + }, + "IndexStatus": { + "target": "com.amazonaws.backup#IndexStatus", + "traits": { + "smithy.api#documentation": "

This is the current status for the backup index associated with the specified recovery\n point.

\n

Statuses are: PENDING | ACTIVE | FAILED |\n DELETING\n

\n

A recovery point with an index that has the status of ACTIVE can be\n included in a search.

" + } + }, + "IndexStatusMessage": { + "target": "com.amazonaws.backup#string", + "traits": { + "smithy.api#documentation": "

A string in the form of a detailed message explaining the status of a backup index\n associated with the recovery point.

" + } } }, "traits": { @@ -6717,6 +6750,124 @@ "smithy.api#output": {} } }, + "com.amazonaws.backup#GetRecoveryPointIndexDetails": { + "type": "operation", + "input": { + "target": "com.amazonaws.backup#GetRecoveryPointIndexDetailsInput" + }, + "output": { + "target": "com.amazonaws.backup#GetRecoveryPointIndexDetailsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backup#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.backup#MissingParameterValueException" + }, + { + "target": "com.amazonaws.backup#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backup#ServiceUnavailableException" + } + ], + "traits": { + "smithy.api#documentation": "

This operation returns the metadata and details specific to \n the backup index associated with the specified recovery point.

", + "smithy.api#http": { + "method": "GET", + "uri": "/backup-vaults/{BackupVaultName}/recovery-points/{RecoveryPointArn}/index", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.backup#GetRecoveryPointIndexDetailsInput": { + "type": "structure", + "members": { + "BackupVaultName": { + "target": "com.amazonaws.backup#BackupVaultName", + "traits": { + "smithy.api#documentation": "

The name of a logical container where backups are stored. Backup vaults are identified\n by names that are unique to the account used to create them and the Region where they are\n created.

\n

Accepted characters include lowercase letters, numbers, and hyphens.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "RecoveryPointArn": { + "target": "com.amazonaws.backup#ARN", + "traits": { + "smithy.api#documentation": "

An ARN that uniquely identifies a recovery point; for example,\n arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.backup#GetRecoveryPointIndexDetailsOutput": { + "type": "structure", + "members": { + "RecoveryPointArn": { + "target": "com.amazonaws.backup#ARN", + "traits": { + "smithy.api#documentation": "

An ARN that uniquely identifies a recovery point; for example,\n arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

" + } + }, + "BackupVaultArn": { + "target": "com.amazonaws.backup#ARN", + "traits": { + "smithy.api#documentation": "

An ARN that uniquely identifies the backup vault where the recovery \n point index is stored.

\n

For example,\n arn:aws:backup:us-east-1:123456789012:backup-vault:aBackupVault.

" + } + }, + "SourceResourceArn": { + "target": "com.amazonaws.backup#ARN", + "traits": { + "smithy.api#documentation": "

A string of the Amazon Resource Name (ARN) that uniquely identifies \n the source resource.

" + } + }, + "IndexCreationDate": { + "target": "com.amazonaws.backup#timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that a backup index was created, in Unix format and Coordinated\n Universal Time (UTC). The value of CreationDate is accurate to milliseconds.\n For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087\n AM.

" + } + }, + "IndexDeletionDate": { + "target": "com.amazonaws.backup#timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that a backup index was deleted, in Unix format and Coordinated\n Universal Time (UTC). The value of CreationDate is accurate to milliseconds.\n For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087\n AM.

" + } + }, + "IndexCompletionDate": { + "target": "com.amazonaws.backup#timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that a backup index finished creation, in Unix format and Coordinated\n Universal Time (UTC). The value of CreationDate is accurate to milliseconds.\n For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087\n AM.

" + } + }, + "IndexStatus": { + "target": "com.amazonaws.backup#IndexStatus", + "traits": { + "smithy.api#documentation": "

This is the current status for the backup index associated \n with the specified recovery point.

\n

Statuses are: PENDING | ACTIVE | FAILED | DELETING\n

\n

A recovery point with an index that has the status of ACTIVE \n can be included in a search.

" + } + }, + "IndexStatusMessage": { + "target": "com.amazonaws.backup#string", + "traits": { + "smithy.api#documentation": "

A detailed message explaining the status of a backup index associated \n with the recovery point.

" + } + }, + "TotalItemsIndexed": { + "target": "com.amazonaws.backup#Long", + "traits": { + "smithy.api#documentation": "

Count of items within the backup index associated with the \n recovery point.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.backup#GetRecoveryPointRestoreMetadata": { "type": "operation", "input": { @@ -7133,6 +7284,140 @@ "com.amazonaws.backup#IAMRoleArn": { "type": "string" }, + "com.amazonaws.backup#Index": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, + "com.amazonaws.backup#IndexAction": { + "type": "structure", + "members": { + "ResourceTypes": { + "target": "com.amazonaws.backup#ResourceTypes", + "traits": { + "smithy.api#documentation": "

0 or 1 index action will be accepted for each BackupRule.

\n

Valid values:

\n
    \n
  • \n

    \n EBS for Amazon Elastic Block Store

    \n
  • \n
  • \n

    \n S3 for Amazon Simple Storage Service (Amazon S3)

    \n
  • \n
" + } + } + }, + "traits": { + "smithy.api#documentation": "

This is an optional array within a BackupRule.

\n

IndexAction consists of one ResourceTypes.

" + } + }, + "com.amazonaws.backup#IndexActions": { + "type": "list", + "member": { + "target": "com.amazonaws.backup#IndexAction" + } + }, + "com.amazonaws.backup#IndexStatus": { + "type": "enum", + "members": { + "PENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PENDING" + } + }, + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACTIVE" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "DELETING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETING" + } + } + } + }, + "com.amazonaws.backup#IndexedRecoveryPoint": { + "type": "structure", + "members": { + "RecoveryPointArn": { + "target": "com.amazonaws.backup#ARN", + "traits": { + "smithy.api#documentation": "

An ARN that uniquely identifies a recovery point; for example,\n arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45\n

" + } + }, + "SourceResourceArn": { + "target": "com.amazonaws.backup#ARN", + "traits": { + "smithy.api#documentation": "

A string of the Amazon Resource Name (ARN) that uniquely identifies \n the source resource.

" + } + }, + "IamRoleArn": { + "target": "com.amazonaws.backup#ARN", + "traits": { + "smithy.api#documentation": "

This specifies the IAM role ARN used for this operation.

\n

For example, arn:aws:iam::123456789012:role/S3Access

" + } + }, + "BackupCreationDate": { + "target": "com.amazonaws.backup#timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that a backup was created, in Unix format and Coordinated\n Universal Time (UTC). The value of CreationDate is accurate to milliseconds.\n For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087\n AM.

" + } + }, + "ResourceType": { + "target": "com.amazonaws.backup#ResourceType", + "traits": { + "smithy.api#documentation": "

The resource type of the indexed recovery point.

\n
    \n
  • \n

    \n EBS for Amazon Elastic Block Store

    \n
  • \n
  • \n

    \n S3 for Amazon Simple Storage Service (Amazon S3)

    \n
  • \n
" + } + }, + "IndexCreationDate": { + "target": "com.amazonaws.backup#timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that a backup index was created, in Unix format and Coordinated\n Universal Time (UTC). The value of CreationDate is accurate to milliseconds.\n For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087\n AM.

" + } + }, + "IndexStatus": { + "target": "com.amazonaws.backup#IndexStatus", + "traits": { + "smithy.api#documentation": "

This is the current status for the backup index associated \n with the specified recovery point.

\n

Statuses are: PENDING | ACTIVE | FAILED | DELETING\n

\n

A recovery point with an index that has the status of ACTIVE \n can be included in a search.

" + } + }, + "IndexStatusMessage": { + "target": "com.amazonaws.backup#string", + "traits": { + "smithy.api#documentation": "

A string in the form of a detailed message explaining the status of a backup index associated \n with the recovery point.

" + } + }, + "BackupVaultArn": { + "target": "com.amazonaws.backup#ARN", + "traits": { + "smithy.api#documentation": "

An ARN that uniquely identifies the backup vault where the recovery \n point index is stored.

\n

For example,\n arn:aws:backup:us-east-1:123456789012:backup-vault:aBackupVault.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

This is a recovery point that has an associated backup index.

\n

Only recovery points with a backup index can be \n included in a search.

" + } + }, + "com.amazonaws.backup#IndexedRecoveryPointList": { + "type": "list", + "member": { + "target": "com.amazonaws.backup#IndexedRecoveryPoint" + } + }, "com.amazonaws.backup#InvalidParameterValueException": { "type": "structure", "members": { @@ -8422,6 +8707,118 @@ "smithy.api#output": {} } }, + "com.amazonaws.backup#ListIndexedRecoveryPoints": { + "type": "operation", + "input": { + "target": "com.amazonaws.backup#ListIndexedRecoveryPointsInput" + }, + "output": { + "target": "com.amazonaws.backup#ListIndexedRecoveryPointsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backup#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.backup#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backup#ServiceUnavailableException" + } + ], + "traits": { + "smithy.api#documentation": "

This operation returns a list of recovery points that have an \n associated index, belonging to the specified account.

\n

Optional parameters you can include are: MaxResults; \n NextToken; SourceResourceArns; CreatedBefore; CreatedAfter; \n and ResourceType.

", + "smithy.api#http": { + "method": "GET", + "uri": "/indexes/recovery-point", + "code": 200 + }, + "smithy.api#idempotent": {}, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "IndexedRecoveryPoints", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.backup#ListIndexedRecoveryPointsInput": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.backup#string", + "traits": { + "smithy.api#documentation": "

The next item following a partial list of returned recovery points.

\n

For example, if a request\n is made to return MaxResults number of indexed recovery points, NextToken\n allows you to return more items in your list starting at the location pointed to by the\n next token.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "MaxResults": { + "target": "com.amazonaws.backup#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of resource list items to be returned.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "SourceResourceArn": { + "target": "com.amazonaws.backup#ARN", + "traits": { + "smithy.api#documentation": "

A string of the Amazon Resource Name (ARN) that uniquely identifies \n the source resource.

", + "smithy.api#httpQuery": "sourceResourceArn" + } + }, + "CreatedBefore": { + "target": "com.amazonaws.backup#timestamp", + "traits": { + "smithy.api#documentation": "

Returns only indexed recovery points that were created before the \n specified date.

", + "smithy.api#httpQuery": "createdBefore" + } + }, + "CreatedAfter": { + "target": "com.amazonaws.backup#timestamp", + "traits": { + "smithy.api#documentation": "

Returns only indexed recovery points that were created after the \n specified date.

", + "smithy.api#httpQuery": "createdAfter" + } + }, + "ResourceType": { + "target": "com.amazonaws.backup#ResourceType", + "traits": { + "smithy.api#documentation": "

Returns a list of indexed recovery points for the specified \n resource type(s).

\n

Accepted values include:

\n
    \n
  • \n

    \n EBS for Amazon Elastic Block Store

    \n
  • \n
  • \n

    \n S3 for Amazon Simple Storage Service (Amazon S3)

    \n
  • \n
", + "smithy.api#httpQuery": "resourceType" + } + }, + "IndexStatus": { + "target": "com.amazonaws.backup#IndexStatus", + "traits": { + "smithy.api#documentation": "

Include this parameter to filter the returned list by \n the indicated statuses.

\n

Accepted values: PENDING | ACTIVE | FAILED | DELETING\n

\n

A recovery point with an index that has the status of ACTIVE \n can be included in a search.

", + "smithy.api#httpQuery": "indexStatus" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.backup#ListIndexedRecoveryPointsOutput": { + "type": "structure", + "members": { + "IndexedRecoveryPoints": { + "target": "com.amazonaws.backup#IndexedRecoveryPointList", + "traits": { + "smithy.api#documentation": "

This is a list of recovery points that have an \n associated index, belonging to the specified account.

" + } + }, + "NextToken": { + "target": "com.amazonaws.backup#string", + "traits": { + "smithy.api#documentation": "

The next item following a partial list of returned recovery points.

\n

For example, if a request\n is made to return MaxResults number of indexed recovery points, NextToken\n allows you to return more items in your list starting at the location pointed to by the\n next token.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.backup#ListLegalHolds": { "type": "operation", "input": { @@ -10316,6 +10713,18 @@ "traits": { "smithy.api#documentation": "

The type of vault in which the described recovery point is stored.

" } + }, + "IndexStatus": { + "target": "com.amazonaws.backup#IndexStatus", + "traits": { + "smithy.api#documentation": "

This is the current status for the backup index associated \n with the specified recovery point.

\n

Statuses are: PENDING | ACTIVE | FAILED |\n DELETING\n

\n

A recovery point with an index that has the status of ACTIVE \n can be included in a search.

" + } + }, + "IndexStatusMessage": { + "target": "com.amazonaws.backup#string", + "traits": { + "smithy.api#documentation": "

A string in the form of a detailed message explaining the status of a backup index associated \n with the recovery point.

" + } } }, "traits": { @@ -10397,6 +10806,18 @@ "traits": { "smithy.api#documentation": "

The type of vault in which the described recovery point is \n stored.

" } + }, + "IndexStatus": { + "target": "com.amazonaws.backup#IndexStatus", + "traits": { + "smithy.api#documentation": "

This is the current status for the backup index associated \n with the specified recovery point.

\n

Statuses are: PENDING | ACTIVE | FAILED | DELETING\n

\n

A recovery point with an index that has the status of ACTIVE \n can be included in a search.

" + } + }, + "IndexStatusMessage": { + "target": "com.amazonaws.backup#string", + "traits": { + "smithy.api#documentation": "

A string in the form of a detailed message explaining the status of a backup index\n associated with the recovery point.

" + } } }, "traits": { @@ -11851,6 +12272,12 @@ "traits": { "smithy.api#documentation": "

The backup option for a selected resource. This option is only available for\n Windows Volume Shadow Copy Service (VSS) backup jobs.

\n

Valid values: Set to \"WindowsVSS\":\"enabled\" to enable the\n WindowsVSS backup option and create a Windows VSS backup. Set to\n \"WindowsVSS\"\"disabled\" to create a regular backup. The\n WindowsVSS option is not enabled by default.

" } + }, + "Index": { + "target": "com.amazonaws.backup#Index", + "traits": { + "smithy.api#documentation": "

Include this parameter to enable index creation if your backup \n job has a resource type that supports backup indexes.

\n

Resource types that support backup indexes include:

\n
    \n
  • \n

    \n EBS for Amazon Elastic Block Store

    \n
  • \n
  • \n

    \n S3 for Amazon Simple Storage Service (Amazon S3)

    \n
  • \n
\n

Index can have 1 of 2 possible values, either ENABLED or \n DISABLED.

\n

To create a backup index for an eligible ACTIVE recovery point \n that does not yet have a backup index, set value to ENABLED.

\n

To delete a backup index, set value to DISABLED.

" + } } }, "traits": { @@ -12621,6 +13048,110 @@ "smithy.api#input": {} } }, + "com.amazonaws.backup#UpdateRecoveryPointIndexSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.backup#UpdateRecoveryPointIndexSettingsInput" + }, + "output": { + "target": "com.amazonaws.backup#UpdateRecoveryPointIndexSettingsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backup#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.backup#InvalidRequestException" + }, + { + "target": "com.amazonaws.backup#MissingParameterValueException" + }, + { + "target": "com.amazonaws.backup#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backup#ServiceUnavailableException" + } + ], + "traits": { + "smithy.api#documentation": "

This operation updates the settings of a recovery point index.

\n

Required: BackupVaultName, RecoveryPointArn, and IAMRoleArn

", + "smithy.api#http": { + "method": "POST", + "uri": "/backup-vaults/{BackupVaultName}/recovery-points/{RecoveryPointArn}/index", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.backup#UpdateRecoveryPointIndexSettingsInput": { + "type": "structure", + "members": { + "BackupVaultName": { + "target": "com.amazonaws.backup#BackupVaultName", + "traits": { + "smithy.api#documentation": "

The name of a logical container where backups are stored. Backup vaults are identified\n by names that are unique to the account used to create them and the Region where they are\n created.

\n

Accepted characters include lowercase letters, numbers, and hyphens.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "RecoveryPointArn": { + "target": "com.amazonaws.backup#ARN", + "traits": { + "smithy.api#documentation": "

An ARN that uniquely identifies a recovery point; for example,\n arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "IamRoleArn": { + "target": "com.amazonaws.backup#IAMRoleArn", + "traits": { + "smithy.api#documentation": "

This specifies the IAM role ARN used for this operation.

\n

For example, arn:aws:iam::123456789012:role/S3Access

" + } + }, + "Index": { + "target": "com.amazonaws.backup#Index", + "traits": { + "smithy.api#documentation": "

Index can have 1 of 2 possible values, either ENABLED or \n DISABLED.

\n

To create a backup index for an eligible ACTIVE recovery point \n that does not yet have a backup index, set value to ENABLED.

\n

To delete a backup index, set value to DISABLED.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.backup#UpdateRecoveryPointIndexSettingsOutput": { + "type": "structure", + "members": { + "BackupVaultName": { + "target": "com.amazonaws.backup#BackupVaultName", + "traits": { + "smithy.api#documentation": "

The name of a logical container where backups are stored. Backup vaults are identified\n by names that are unique to the account used to create them and the Region where they are\n created.

" + } + }, + "RecoveryPointArn": { + "target": "com.amazonaws.backup#ARN", + "traits": { + "smithy.api#documentation": "

An ARN that uniquely identifies a recovery point; for example,\n arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

" + } + }, + "IndexStatus": { + "target": "com.amazonaws.backup#IndexStatus", + "traits": { + "smithy.api#documentation": "

This is the current status for the backup index associated \n with the specified recovery point.

\n

Statuses are: PENDING | ACTIVE | FAILED | DELETING\n

\n

A recovery point with an index that has the status of ACTIVE \n can be included in a search.

" + } + }, + "Index": { + "target": "com.amazonaws.backup#Index", + "traits": { + "smithy.api#documentation": "

Index can have 1 of 2 possible values, either ENABLED or\n DISABLED.

\n

A value of ENABLED means a backup index for an eligible ACTIVE\n recovery point has been created.

\n

A value of DISABLED means a backup index was deleted.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.backup#UpdateRecoveryPointLifecycle": { "type": "operation", "input": { diff --git a/models/backupsearch.json b/models/backupsearch.json new file mode 100644 index 0000000000..a9bd5537b7 --- /dev/null +++ b/models/backupsearch.json @@ -0,0 +1,2820 @@ +{ + "smithy": "2.0", + "shapes": { + "com.amazonaws.backupsearch#AccessDeniedException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

User does not have sufficient access to perform this action.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

You do not have sufficient access to perform this action.

", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.backupsearch#BackupCreationTimeFilter": { + "type": "structure", + "members": { + "CreatedAfter": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

This timestamp includes recovery points only \n created after the specified time.

" + } + }, + "CreatedBefore": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

This timestamp includes recovery points only \n created before the specified time.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

This filters by recovery points within the CreatedAfter \n and CreatedBefore timestamps.

" + } + }, + "com.amazonaws.backupsearch#ConflictException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Updating or deleting a resource can cause an inconsistent state.

", + "smithy.api#required": {} + } + }, + "resourceId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Identifier of the resource affected.

", + "smithy.api#required": {} + } + }, + "resourceType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Type of the resource affected.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

This exception occurs when a conflict with a previous successful\n operation is detected. This generally occurs when the previous \n operation did not have time to propagate to the host serving the \n current request.

\n

A retry (with appropriate backoff logic) is the recommended \n response to this exception.

", + "smithy.api#error": "client", + "smithy.api#httpError": 409 + } + }, + "com.amazonaws.backupsearch#CryoBackupSearchService": { + "type": "service", + "version": "2018-05-10", + "operations": [ + { + "target": "com.amazonaws.backupsearch#ListSearchJobBackups" + }, + { + "target": "com.amazonaws.backupsearch#ListSearchJobResults" + }, + { + "target": "com.amazonaws.backupsearch#ListTagsForResource" + }, + { + "target": "com.amazonaws.backupsearch#TagResource" + }, + { + "target": "com.amazonaws.backupsearch#UntagResource" + } + ], + "resources": [ + { + "target": "com.amazonaws.backupsearch#SearchJob" + }, + { + "target": "com.amazonaws.backupsearch#SearchResultExportJob" + } + ], + "errors": [ + { + "target": "com.amazonaws.backupsearch#AccessDeniedException" + }, + { + "target": "com.amazonaws.backupsearch#InternalServerException" + }, + { + "target": "com.amazonaws.backupsearch#ThrottlingException" + }, + { + "target": "com.amazonaws.backupsearch#ValidationException" + } + ], + "traits": { + "aws.api#service": { + "sdkId": "BackupSearch", + "arnNamespace": "backup-search", + "endpointPrefix": "backup-search", + "cloudTrailEventSource": "backup.amazonaws.com" + }, + "aws.auth#sigv4": { + "name": "backup-search" + }, + "aws.endpoints#dualStackOnlyEndpoints": {}, + "aws.endpoints#standardPartitionalEndpoints": { + "endpointPatternType": "service_region_dnsSuffix" + }, + "aws.protocols#restJson1": {}, + "smithy.api#cors": { + "additionalAllowedHeaders": [ + "*", + "Authorization", + "Date", + "X-Amz-Date", + "X-Amz-Security-Token", + "X-Amz-Target", + "content-type", + "x-amz-content-sha256", + "x-amz-user-agent", + "x-amzn-platform-id", + "x-amzn-trace-id" + ], + "additionalExposedHeaders": [ + "x-amzn-errortype", + "x-amzn-requestid", + "x-amzn-errormessage", + "x-amzn-trace-id", + "x-amzn-requestid", + "x-amz-apigw-id", + "date" + ] + }, + "smithy.api#documentation": "Backup Search\n

Backup Search is the recovery point and item level search for Backup.

\n

For additional information, see:

\n ", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + }, + "smithy.api#title": "AWS Backup Search", + "smithy.rules#endpointRuleSet": { + "version": "1.0", + "parameters": { + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://backup-search-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://backup-search.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] + }, + "smithy.rules#endpointTests": { + "testCases": [ + { + "documentation": "For custom endpoint with region not set and fips disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with fips enabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://backup-search-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://backup-search.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://backup-search-fips.cn-northwest-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": true + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://backup-search.cn-northwest-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://backup-search-fips.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://backup-search.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } + ] + }, + "url": "https://backup-search-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } + ] + }, + "url": "https://backup-search.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } + ] + }, + "url": "https://backup-search-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } + ] + }, + "url": "https://backup-search.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eu-isoe-west-1" + } + ] + }, + "url": "https://backup-search-fips.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eu-isoe-west-1" + } + ] + }, + "url": "https://backup-search.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://backup-search-fips.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://backup-search.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": false + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" + } + } + }, + "com.amazonaws.backupsearch#CurrentSearchProgress": { + "type": "structure", + "members": { + "RecoveryPointsScannedCount": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

This number is the sum of all backups that \n have been scanned so far during a search job.

" + } + }, + "ItemsScannedCount": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

This number is the sum of all items that \n have been scanned so far during a search job.

" + } + }, + "ItemsMatchedCount": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

This number is the sum of all items that match \n the item filters in a search job in progress.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

This contains information results retrieved from \n a search job that may not have completed.

" + } + }, + "com.amazonaws.backupsearch#EBSItemFilter": { + "type": "structure", + "members": { + "FilePaths": { + "target": "com.amazonaws.backupsearch#StringConditionList", + "traits": { + "smithy.api#documentation": "

You can include 1 to 10 values.

\n

If one file path is included, the results will \n return only items that match the file path.

\n

If more than one file path is included, the \n results will return all items that match any of the \n file paths.

" + } + }, + "Sizes": { + "target": "com.amazonaws.backupsearch#LongConditionList", + "traits": { + "smithy.api#documentation": "

You can include 1 to 10 values.

\n

If one is included, the results will \n return only items that match.

\n

If more than one is included, the \n results will return all items that match any of \n the included values.

" + } + }, + "CreationTimes": { + "target": "com.amazonaws.backupsearch#TimeConditionList", + "traits": { + "smithy.api#documentation": "

You can include 1 to 10 values.

\n

If one is included, the results will \n return only items that match.

\n

If more than one is included, the \n results will return all items that match any of \n the included values.

" + } + }, + "LastModificationTimes": { + "target": "com.amazonaws.backupsearch#TimeConditionList", + "traits": { + "smithy.api#documentation": "

You can include 1 to 10 values.

\n

If one is included, the results will \n return only items that match.

\n

If more than one is included, the \n results will return all items that match any of \n the included values.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

This contains arrays of objects, which may include \n CreationTimes time condition objects, FilePaths \n string objects, LastModificationTimes time \n condition objects,

" + } + }, + "com.amazonaws.backupsearch#EBSItemFilters": { + "type": "list", + "member": { + "target": "com.amazonaws.backupsearch#EBSItemFilter" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10 + } + } + }, + "com.amazonaws.backupsearch#EBSResultItem": { + "type": "structure", + "members": { + "BackupResourceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

These are one or more items in the \n results that match values for the Amazon Resource \n Name (ARN) of recovery points returned in a search \n of Amazon EBS backup metadata.

" + } + }, + "SourceResourceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

These are one or more items in the \n results that match values for the Amazon Resource \n Name (ARN) of source resources returned in a search \n of Amazon EBS backup metadata.

" + } + }, + "BackupVaultName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the backup vault.

" + } + }, + "FileSystemIdentifier": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

These are one or more items in the \n results that match values for file systems returned \n in a search of Amazon EBS backup metadata.

" + } + }, + "FilePath": { + "target": "com.amazonaws.backupsearch#FilePath", + "traits": { + "smithy.api#documentation": "

These are one or more items in the \n results that match values for file paths returned \n in a search of Amazon EBS backup metadata.

" + } + }, + "FileSize": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

These are one or more items in the \n results that match values for file sizes returned \n in a search of Amazon EBS backup metadata.

" + } + }, + "CreationTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

These are one or more items in the \n results that match values for creation times returned \n in a search of Amazon EBS backup metadata.

" + } + }, + "LastModifiedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

These are one or more items in the \n results that match values for Last Modified Time returned \n in a search of Amazon EBS backup metadata.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

These are the items returned in the results of \n a search of Amazon EBS backup metadata.

" + } + }, + "com.amazonaws.backupsearch#EncryptionKeyArn": { + "type": "string", + "traits": { + "aws.api#arnReference": { + "type": "AWS::KMS::Key" + } + } + }, + "com.amazonaws.backupsearch#ExportJobArn": { + "type": "string", + "traits": { + "aws.api#arnReference": { + "service": "com.amazonaws.backupsearch#CryoBackupSearchService" + } + } + }, + "com.amazonaws.backupsearch#ExportJobStatus": { + "type": "enum", + "members": { + "RUNNING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RUNNING" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "COMPLETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPLETED" + } + } + } + }, + "com.amazonaws.backupsearch#ExportJobSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.backupsearch#ExportJobSummary" + } + }, + "com.amazonaws.backupsearch#ExportJobSummary": { + "type": "structure", + "members": { + "ExportJobIdentifier": { + "target": "com.amazonaws.backupsearch#GenericId", + "traits": { + "smithy.api#documentation": "

This is the unique string that identifies a \n specific export job.

", + "smithy.api#required": {} + } + }, + "ExportJobArn": { + "target": "com.amazonaws.backupsearch#ExportJobArn", + "traits": { + "smithy.api#documentation": "

This is the unique ARN (Amazon Resource Name) that \n belongs to the new export job.

" + } + }, + "Status": { + "target": "com.amazonaws.backupsearch#ExportJobStatus", + "traits": { + "smithy.api#documentation": "

The status of the export job is one of the \n following:

\n

\n CREATED; RUNNING; \n FAILED; or COMPLETED.

" + } + }, + "CreationTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

This is a timestamp of the time the export job \n was created.

" + } + }, + "CompletionTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

This is a timestamp of the time the export job \n compeleted.

" + } + }, + "StatusMessage": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A status message is a string that is returned for an export\n job.

\n

A status message is included for any status other \n than COMPLETED without issues.

" + } + }, + "SearchJobArn": { + "target": "com.amazonaws.backupsearch#SearchJobArn", + "traits": { + "smithy.api#documentation": "

The unique string that identifies the Amazon Resource \n Name (ARN) of the specified search job.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

This is the summary of an export job.

" + } + }, + "com.amazonaws.backupsearch#ExportSpecification": { + "type": "union", + "members": { + "s3ExportSpecification": { + "target": "com.amazonaws.backupsearch#S3ExportSpecification", + "traits": { + "smithy.api#documentation": "

This specifies the destination Amazon S3 \n bucket for the export job. And, if included, it also \n specifies the destination prefix.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

This contains the export specification object.

" + } + }, + "com.amazonaws.backupsearch#FilePath": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.backupsearch#GenericId": { + "type": "string" + }, + "com.amazonaws.backupsearch#GetSearchJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupsearch#GetSearchJobInput" + }, + "output": { + "target": "com.amazonaws.backupsearch#GetSearchJobOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupsearch#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

This operation retrieves metadata of a search job, \n including its progress.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/search-jobs/{SearchJobIdentifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.backupsearch#GetSearchJobInput": { + "type": "structure", + "members": { + "SearchJobIdentifier": { + "target": "com.amazonaws.backupsearch#GenericId", + "traits": { + "smithy.api#documentation": "

Required unique string that specifies the \n search job.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.backupsearch#GetSearchJobOutput": { + "type": "structure", + "members": { + "Name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Returned name of the specified search job.

" + } + }, + "SearchScopeSummary": { + "target": "com.amazonaws.backupsearch#SearchScopeSummary", + "traits": { + "smithy.api#documentation": "

Returned summary of the specified search job scope, \n including:\n

\n
    \n
  • \n

    TotalBackupsToScanCount, the number of \n recovery points returned by the search.

    \n
  • \n
  • \n

    TotalItemsToScanCount, the number of \n items returned by the search.

    \n
  • \n
" + } + }, + "CurrentSearchProgress": { + "target": "com.amazonaws.backupsearch#CurrentSearchProgress", + "traits": { + "smithy.api#documentation": "

Returns numbers representing BackupsScannedCount, \n ItemsScanned, and ItemsMatched.

" + } + }, + "StatusMessage": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A status message will be returned for either a \n earch job with a status of ERRORED or a status of \n COMPLETED jobs with issues.

\n

For example, a message may say that a search \n contained recovery points unable to be scanned because \n of a permissions issue.

" + } + }, + "EncryptionKeyArn": { + "target": "com.amazonaws.backupsearch#EncryptionKeyArn", + "traits": { + "smithy.api#documentation": "

The encryption key for the specified \n search job.

\n

Example: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab.

" + } + }, + "CompletionTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that a search job completed, in Unix format and Coordinated\n Universal Time (UTC). The value of CompletionTime is accurate to milliseconds.\n For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087\n AM.

" + } + }, + "Status": { + "target": "com.amazonaws.backupsearch#SearchJobState", + "traits": { + "smithy.api#documentation": "

The current status of the specified search job.

\n

A search job may have one of the following statuses: \n RUNNING; COMPLETED; STOPPED; \n FAILED; TIMED_OUT; or EXPIRED\n .

", + "smithy.api#required": {} + } + }, + "SearchScope": { + "target": "com.amazonaws.backupsearch#SearchScope", + "traits": { + "smithy.api#documentation": "

The search scope is all backup \n properties input into a search.

", + "smithy.api#required": {} + } + }, + "ItemFilters": { + "target": "com.amazonaws.backupsearch#ItemFilters", + "traits": { + "smithy.api#documentation": "

Item Filters represent all input item \n properties specified when the search was \n created.

", + "smithy.api#required": {} + } + }, + "CreationTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that a search job was created, in Unix format and Coordinated\n Universal Time (UTC). The value of CompletionTime is accurate to milliseconds.\n For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087\n AM.

", + "smithy.api#required": {} + } + }, + "SearchJobIdentifier": { + "target": "com.amazonaws.backupsearch#GenericId", + "traits": { + "smithy.api#documentation": "

The unique string that identifies the specified search job.

", + "smithy.api#required": {} + } + }, + "SearchJobArn": { + "target": "com.amazonaws.backupsearch#SearchJobArn", + "traits": { + "smithy.api#documentation": "

The unique string that identifies the Amazon Resource \n Name (ARN) of the specified search job.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.backupsearch#GetSearchResultExportJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupsearch#GetSearchResultExportJobInput" + }, + "output": { + "target": "com.amazonaws.backupsearch#GetSearchResultExportJobOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupsearch#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

This operation retrieves the metadata of an export job.

\n

An export job is an operation that transmits the results \n of a search job to a specified S3 bucket in a \n .csv file.

\n

An export job allows you to retain results of a search \n beyond the search job's scheduled retention of 7 days.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/export-search-jobs/{ExportJobIdentifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.backupsearch#GetSearchResultExportJobInput": { + "type": "structure", + "members": { + "ExportJobIdentifier": { + "target": "com.amazonaws.backupsearch#GenericId", + "traits": { + "smithy.api#documentation": "

This is the unique string that identifies a \n specific export job.

\n

Required for this operation.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.backupsearch#GetSearchResultExportJobOutput": { + "type": "structure", + "members": { + "ExportJobIdentifier": { + "target": "com.amazonaws.backupsearch#GenericId", + "traits": { + "smithy.api#documentation": "

This is the unique string that identifies the \n specified export job.

", + "smithy.api#required": {} + } + }, + "ExportJobArn": { + "target": "com.amazonaws.backupsearch#ExportJobArn", + "traits": { + "smithy.api#documentation": "

The unique Amazon Resource Name (ARN) that uniquely identifies \n the export job.

" + } + }, + "Status": { + "target": "com.amazonaws.backupsearch#ExportJobStatus", + "traits": { + "smithy.api#documentation": "

This is the current status of the export job.

" + } + }, + "CreationTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that an export job was created, in Unix format and Coordinated Universal\n Time (UTC). The value of CreationTime is accurate to milliseconds. For\n example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087\n AM.

" + } + }, + "CompletionTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that an export job completed, in Unix format and Coordinated Universal\n Time (UTC). The value of CreationTime is accurate to milliseconds. For\n example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087\n AM.

" + } + }, + "StatusMessage": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A status message is a string that is returned for search job \n with a status of FAILED, along with steps to remedy \n and retry the operation.

" + } + }, + "ExportSpecification": { + "target": "com.amazonaws.backupsearch#ExportSpecification", + "traits": { + "smithy.api#documentation": "

The export specification consists of the destination \n S3 bucket to which the search results were exported, along \n with the destination prefix.

" + } + }, + "SearchJobArn": { + "target": "com.amazonaws.backupsearch#SearchJobArn", + "traits": { + "smithy.api#documentation": "

The unique string that identifies the Amazon Resource \n Name (ARN) of the specified search job.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.backupsearch#IamRoleArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 20, + "max": 2048 + }, + "smithy.api#pattern": "^arn:(?:aws|aws-cn|aws-us-gov):iam::[a-z0-9-]+:role/(.+)$" + } + }, + "com.amazonaws.backupsearch#InternalServerException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Unexpected error during processing of request.

", + "smithy.api#required": {} + } + }, + "retryAfterSeconds": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

Retry the call after number of seconds.

", + "smithy.api#httpHeader": "Retry-After" + } + } + }, + "traits": { + "smithy.api#documentation": "

An internal server error occurred. Retry your request.

", + "smithy.api#error": "server", + "smithy.api#httpError": 500, + "smithy.api#retryable": {} + } + }, + "com.amazonaws.backupsearch#ItemFilters": { + "type": "structure", + "members": { + "S3ItemFilters": { + "target": "com.amazonaws.backupsearch#S3ItemFilters", + "traits": { + "smithy.api#documentation": "

This array can contain CreationTimes, ETags, \n ObjectKeys, Sizes, or VersionIds objects.

" + } + }, + "EBSItemFilters": { + "target": "com.amazonaws.backupsearch#EBSItemFilters", + "traits": { + "smithy.api#documentation": "

This array can contain CreationTimes, \n FilePaths, LastModificationTimes, or Sizes objects.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Item Filters represent all input item \n properties specified when the search was \n created.

\n

Contains either EBSItemFilters or \n S3ItemFilters

" + } + }, + "com.amazonaws.backupsearch#ListSearchJobBackups": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupsearch#ListSearchJobBackupsInput" + }, + "output": { + "target": "com.amazonaws.backupsearch#ListSearchJobBackupsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupsearch#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

This operation returns a list of all backups (recovery \n points) in a paginated format that were included in \n the search job.

\n

If a search does not display an expected backup in \n the results, you can call this operation to display each \n backup included in the search. Any backups that were not \n included because they have a FAILED status \n from a permissions issue will be displayed, along with a \n status message.

\n

Only recovery points with a backup index that has \n a status of ACTIVE will be included in search results. \n If the index has any other status, its status will be \n displayed along with a status message.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/search-jobs/{SearchJobIdentifier}/backups" + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults", + "items": "Results" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.backupsearch#ListSearchJobBackupsInput": { + "type": "structure", + "members": { + "SearchJobIdentifier": { + "target": "com.amazonaws.backupsearch#GenericId", + "traits": { + "smithy.api#documentation": "

The unique string that specifies the search job.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The next item following a partial list of returned backups \n included in a search job.

\n

For example, if a request\n is made to return MaxResults number of backups, NextToken\n allows you to return more items in your list starting at the location pointed to by the\n next token.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "MaxResults": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#default": 1000, + "smithy.api#documentation": "

The maximum number of resource list items to be returned.

", + "smithy.api#httpQuery": "maxResults", + "smithy.api#range": { + "min": 1, + "max": 1000 + } + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.backupsearch#ListSearchJobBackupsOutput": { + "type": "structure", + "members": { + "Results": { + "target": "com.amazonaws.backupsearch#SearchJobBackupsResults", + "traits": { + "smithy.api#documentation": "

The recovery points returned the results of a \n search job

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The next item following a partial list of returned backups \n included in a search job.

\n

For example, if a request\n is made to return MaxResults number of backups, NextToken\n allows you to return more items in your list starting at the location pointed to by the\n next token.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.backupsearch#ListSearchJobResults": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupsearch#ListSearchJobResultsInput" + }, + "output": { + "target": "com.amazonaws.backupsearch#ListSearchJobResultsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupsearch#ResourceNotFoundException" + } + ], + "traits": { + "aws.api#dataPlane": {}, + "smithy.api#documentation": "

This operation returns a list of a specified search job.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/search-jobs/{SearchJobIdentifier}/search-results" + }, + "smithy.api#paginated": { + "items": "Results" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.backupsearch#ListSearchJobResultsInput": { + "type": "structure", + "members": { + "SearchJobIdentifier": { + "target": "com.amazonaws.backupsearch#GenericId", + "traits": { + "smithy.api#documentation": "

The unique string that specifies the search job.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The next item following a partial list of returned \n search job results.

\n

For example, if a request\n is made to return MaxResults number of \n search job results, NextToken\n allows you to return more items in your list starting at the location pointed to by the\n next token.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "MaxResults": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#default": 1000, + "smithy.api#documentation": "

The maximum number of resource list items to be returned.

", + "smithy.api#httpQuery": "maxResults", + "smithy.api#range": { + "min": 1, + "max": 1000 + } + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.backupsearch#ListSearchJobResultsOutput": { + "type": "structure", + "members": { + "Results": { + "target": "com.amazonaws.backupsearch#Results", + "traits": { + "smithy.api#documentation": "

The results consist of either EBSResultItem or S3ResultItem.

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The next item following a partial list of \n search job results.

\n

For example, if a request\n is made to return MaxResults number of backups, NextToken\n allows you to return more items in your list starting at the location pointed to by the\n next token.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.backupsearch#ListSearchJobs": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupsearch#ListSearchJobsInput" + }, + "output": { + "target": "com.amazonaws.backupsearch#ListSearchJobsOutput" + }, + "traits": { + "smithy.api#documentation": "

This operation returns a list of search jobs belonging \n to an account.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/search-jobs" + }, + "smithy.api#paginated": { + "items": "SearchJobs" + }, + "smithy.api#readonly": {}, + "smithy.test#smokeTests": [ + { + "id": "ListSearchJobsSuccess", + "params": {}, + "expect": { + "success": {} + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-east-1" + } + } + ] + } + }, + "com.amazonaws.backupsearch#ListSearchJobsInput": { + "type": "structure", + "members": { + "ByStatus": { + "target": "com.amazonaws.backupsearch#SearchJobState", + "traits": { + "smithy.api#documentation": "

Include this parameter to filter list by search \n job status.

", + "smithy.api#httpQuery": "Status", + "smithy.api#notProperty": {} + } + }, + "NextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The next item following a partial list of returned \n search jobs.

\n

For example, if a request\n is made to return MaxResults number of backups, NextToken\n allows you to return more items in your list starting at the location pointed to by the\n next token.

", + "smithy.api#httpQuery": "NextToken", + "smithy.api#notProperty": {} + } + }, + "MaxResults": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#default": 1000, + "smithy.api#documentation": "

The maximum number of resource list items to be returned.

", + "smithy.api#httpQuery": "MaxResults", + "smithy.api#notProperty": {}, + "smithy.api#range": { + "min": 1, + "max": 1000 + } + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.backupsearch#ListSearchJobsOutput": { + "type": "structure", + "members": { + "SearchJobs": { + "target": "com.amazonaws.backupsearch#SearchJobs", + "traits": { + "smithy.api#documentation": "

The search jobs among the list, with details of \n the returned search jobs.

", + "smithy.api#notProperty": {}, + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The next item following a partial list of returned backups \n included in a search job.

\n

For example, if a request\n is made to return MaxResults number of backups, NextToken\n allows you to return more items in your list starting at the location pointed to by the\n next token.

", + "smithy.api#notProperty": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.backupsearch#ListSearchResultExportJobs": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupsearch#ListSearchResultExportJobsInput" + }, + "output": { + "target": "com.amazonaws.backupsearch#ListSearchResultExportJobsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupsearch#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backupsearch#ServiceQuotaExceededException" + } + ], + "traits": { + "smithy.api#documentation": "

This operation exports search results of a search job \n to a specified destination S3 bucket.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/export-search-jobs" + }, + "smithy.api#paginated": { + "items": "ExportJobs" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.backupsearch#ListSearchResultExportJobsInput": { + "type": "structure", + "members": { + "Status": { + "target": "com.amazonaws.backupsearch#ExportJobStatus", + "traits": { + "smithy.api#documentation": "

The search jobs to be included in the export job \n can be filtered by including this parameter.

", + "smithy.api#httpQuery": "Status", + "smithy.api#notProperty": {} + } + }, + "SearchJobIdentifier": { + "target": "com.amazonaws.backupsearch#GenericId", + "traits": { + "smithy.api#documentation": "

The unique string that specifies the search job.

", + "smithy.api#httpQuery": "SearchJobIdentifier", + "smithy.api#notProperty": {} + } + }, + "NextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The next item following a partial list of returned backups \n included in a search job.

\n

For example, if a request\n is made to return MaxResults number of backups, NextToken\n allows you to return more items in your list starting at the location pointed to by the\n next token.

", + "smithy.api#httpQuery": "NextToken", + "smithy.api#notProperty": {} + } + }, + "MaxResults": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#default": 1000, + "smithy.api#documentation": "

The maximum number of resource list items to be returned.

", + "smithy.api#httpQuery": "MaxResults", + "smithy.api#notProperty": {}, + "smithy.api#range": { + "min": 1, + "max": 1000 + } + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.backupsearch#ListSearchResultExportJobsOutput": { + "type": "structure", + "members": { + "ExportJobs": { + "target": "com.amazonaws.backupsearch#ExportJobSummaries", + "traits": { + "smithy.api#documentation": "

The operation returns the included export jobs.

", + "smithy.api#notProperty": {}, + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The next item following a partial list of returned backups \n included in a search job.

\n

For example, if a request\n is made to return MaxResults number of backups, NextToken\n allows you to return more items in your list starting at the location pointed to by the\n next token.

", + "smithy.api#notProperty": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.backupsearch#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupsearch#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.backupsearch#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.backupsearch#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

This operation returns the tags for a resource type.

", + "smithy.api#http": { + "uri": "/tags/{ResourceArn}", + "method": "GET" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.backupsearch#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies \n the resource.>

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.backupsearch#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "Tags": { + "target": "com.amazonaws.backupsearch#TagMap", + "traits": { + "smithy.api#documentation": "

List of tags returned by the operation.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.backupsearch#LongCondition": { + "type": "structure", + "members": { + "Value": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

The value of an item included in one of the search \n item filters.

", + "smithy.api#required": {} + } + }, + "Operator": { + "target": "com.amazonaws.backupsearch#LongConditionOperator", + "traits": { + "smithy.api#default": "EQUALS_TO", + "smithy.api#documentation": "

A string that defines what values will be \n returned.

\n

If this is included, avoid combinations of \n operators that will return all possible values. \n For example, including both EQUALS_TO \n and NOT_EQUALS_TO with a value of 4 \n will return all values.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The long condition contains a Value \n and can optionally contain an Operator.

" + } + }, + "com.amazonaws.backupsearch#LongConditionList": { + "type": "list", + "member": { + "target": "com.amazonaws.backupsearch#LongCondition" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.backupsearch#LongConditionOperator": { + "type": "enum", + "members": { + "EQUALS_TO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EQUALS_TO" + } + }, + "NOT_EQUALS_TO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOT_EQUALS_TO" + } + }, + "LESS_THAN_EQUAL_TO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LESS_THAN_EQUAL_TO" + } + }, + "GREATER_THAN_EQUAL_TO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GREATER_THAN_EQUAL_TO" + } + } + } + }, + "com.amazonaws.backupsearch#ObjectKey": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.backupsearch#RecoveryPoint": { + "type": "string", + "traits": { + "aws.api#arnReference": { + "type": "AWS::Backup::RecoveryPoint" + } + } + }, + "com.amazonaws.backupsearch#RecoveryPointArnList": { + "type": "list", + "member": { + "target": "com.amazonaws.backupsearch#RecoveryPoint" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.backupsearch#ResourceArnList": { + "type": "list", + "member": { + "target": "smithy.api#String" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.backupsearch#ResourceNotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Request references a resource which does not exist.

", + "smithy.api#required": {} + } + }, + "resourceId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Hypothetical identifier of the resource affected.

", + "smithy.api#required": {} + } + }, + "resourceType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Hypothetical type of the resource affected.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The resource was not found for this request.

\n

Confirm the resource information, such as the ARN or type is correct \n and exists, then retry the request.

", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.backupsearch#ResourceType": { + "type": "enum", + "members": { + "S3": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "S3" + } + }, + "EBS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EBS" + } + } + } + }, + "com.amazonaws.backupsearch#ResourceTypeList": { + "type": "list", + "member": { + "target": "com.amazonaws.backupsearch#ResourceType" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, + "com.amazonaws.backupsearch#ResultItem": { + "type": "union", + "members": { + "S3ResultItem": { + "target": "com.amazonaws.backupsearch#S3ResultItem", + "traits": { + "smithy.api#documentation": "

These are items returned in the search results \n of an Amazon S3 search.

" + } + }, + "EBSResultItem": { + "target": "com.amazonaws.backupsearch#EBSResultItem", + "traits": { + "smithy.api#documentation": "

These are items returned in the search results \n of an Amazon EBS search.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

This is an object representing the item \n returned in the results of a search for a specific \n resource type.

" + } + }, + "com.amazonaws.backupsearch#Results": { + "type": "list", + "member": { + "target": "com.amazonaws.backupsearch#ResultItem" + } + }, + "com.amazonaws.backupsearch#S3ExportSpecification": { + "type": "structure", + "members": { + "DestinationBucket": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

This specifies the destination Amazon S3 \n bucket for the export job.

", + "smithy.api#required": {} + } + }, + "DestinationPrefix": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

This specifies the prefix for the destination \n Amazon S3 bucket for the export job.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

This specification contains a required string of the \n destination bucket; optionally, you can include the \n destination prefix.

" + } + }, + "com.amazonaws.backupsearch#S3ItemFilter": { + "type": "structure", + "members": { + "ObjectKeys": { + "target": "com.amazonaws.backupsearch#StringConditionList", + "traits": { + "smithy.api#documentation": "

You can include 1 to 10 values.

\n

If one value is included, the results will \n return only items that match the value.

\n

If more than one value is included, the \n results will return all items that match any of the \n values.

" + } + }, + "Sizes": { + "target": "com.amazonaws.backupsearch#LongConditionList", + "traits": { + "smithy.api#documentation": "

You can include 1 to 10 values.

\n

If one value is included, the results will \n return only items that match the value.

\n

If more than one value is included, the \n results will return all items that match any of the \n values.

" + } + }, + "CreationTimes": { + "target": "com.amazonaws.backupsearch#TimeConditionList", + "traits": { + "smithy.api#documentation": "

You can include 1 to 10 values.

\n

If one value is included, the results will \n return only items that match the value.

\n

If more than one value is included, the \n results will return all items that match any of the \n values.

" + } + }, + "VersionIds": { + "target": "com.amazonaws.backupsearch#StringConditionList", + "traits": { + "smithy.api#documentation": "

You can include 1 to 10 values.

\n

If one value is included, the results will \n return only items that match the value.

\n

If more than one value is included, the \n results will return all items that match any of the \n values.

" + } + }, + "ETags": { + "target": "com.amazonaws.backupsearch#StringConditionList", + "traits": { + "smithy.api#documentation": "

You can include 1 to 10 values.

\n

If one value is included, the results will \n return only items that match the value.

\n

If more than one value is included, the \n results will return all items that match any of the \n values.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

This contains arrays of objects, which may include \n ObjectKeys, Sizes, CreationTimes, VersionIds, and/or \n Etags.

" + } + }, + "com.amazonaws.backupsearch#S3ItemFilters": { + "type": "list", + "member": { + "target": "com.amazonaws.backupsearch#S3ItemFilter" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10 + } + } + }, + "com.amazonaws.backupsearch#S3ResultItem": { + "type": "structure", + "members": { + "BackupResourceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

These are items in the returned results that match \n recovery point Amazon Resource Names (ARN) input during \n a search of Amazon S3 backup metadata.

" + } + }, + "SourceResourceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

These are items in the returned results that match \n source Amazon Resource Names (ARN) input during \n a search of Amazon S3 backup metadata.

" + } + }, + "BackupVaultName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the backup vault.

" + } + }, + "ObjectKey": { + "target": "com.amazonaws.backupsearch#ObjectKey", + "traits": { + "smithy.api#documentation": "

This is one or more items \n returned in the results of a search of Amazon S3 \n backup metadata that match the values input for \n object key.

" + } + }, + "ObjectSize": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

These are items in the returned results that match \n values for object size(s) input during a search of \n Amazon S3 backup metadata.

" + } + }, + "CreationTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

These are one or more items in the returned results \n that match values for item creation time input during \n a search of Amazon S3 backup metadata.

" + } + }, + "ETag": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

These are one or more items in the returned results \n that match values for ETags input during \n a search of Amazon S3 backup metadata.

" + } + }, + "VersionId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

These are one or more items in the returned results \n that match values for version IDs input during \n a search of Amazon S3 backup metadata.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

These are the items returned in the results of \n a search of Amazon S3 backup metadata.

" + } + }, + "com.amazonaws.backupsearch#SearchJob": { + "type": "resource", + "identifiers": { + "SearchJobIdentifier": { + "target": "com.amazonaws.backupsearch#GenericId" + } + }, + "properties": { + "Status": { + "target": "com.amazonaws.backupsearch#SearchJobState" + }, + "Name": { + "target": "smithy.api#String" + }, + "EncryptionKeyArn": { + "target": "com.amazonaws.backupsearch#EncryptionKeyArn" + }, + "SearchScope": { + "target": "com.amazonaws.backupsearch#SearchScope" + }, + "ItemFilters": { + "target": "com.amazonaws.backupsearch#ItemFilters" + }, + "CreationTime": { + "target": "smithy.api#Timestamp" + }, + "CompletionTime": { + "target": "smithy.api#Timestamp" + }, + "SearchScopeSummary": { + "target": "com.amazonaws.backupsearch#SearchScopeSummary" + }, + "CurrentSearchProgress": { + "target": "com.amazonaws.backupsearch#CurrentSearchProgress" + }, + "StatusMessage": { + "target": "smithy.api#String" + }, + "ClientToken": { + "target": "smithy.api#String" + }, + "Tags": { + "target": "com.amazonaws.backupsearch#TagMap" + }, + "SearchJobArn": { + "target": "com.amazonaws.backupsearch#SearchJobArn" + } + }, + "create": { + "target": "com.amazonaws.backupsearch#StartSearchJob" + }, + "read": { + "target": "com.amazonaws.backupsearch#GetSearchJob" + }, + "update": { + "target": "com.amazonaws.backupsearch#StopSearchJob" + }, + "list": { + "target": "com.amazonaws.backupsearch#ListSearchJobs" + } + }, + "com.amazonaws.backupsearch#SearchJobArn": { + "type": "string", + "traits": { + "aws.api#arnReference": { + "service": "com.amazonaws.backupsearch#CryoBackupSearchService" + } + } + }, + "com.amazonaws.backupsearch#SearchJobBackupsResult": { + "type": "structure", + "members": { + "Status": { + "target": "com.amazonaws.backupsearch#SearchJobState", + "traits": { + "smithy.api#documentation": "

This is the status of the search job backup result.

" + } + }, + "StatusMessage": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

This is the status message included with the results.

" + } + }, + "ResourceType": { + "target": "com.amazonaws.backupsearch#ResourceType", + "traits": { + "smithy.api#documentation": "

This is the resource type of the search.

" + } + }, + "BackupResourceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies \n the backup resources.

" + } + }, + "SourceResourceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies \n the source resources.

" + } + }, + "IndexCreationTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

This is the creation time of the backup index.

" + } + }, + "BackupCreationTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

This is the creation time of the backup (recovery point).

" + } + } + }, + "traits": { + "smithy.api#documentation": "

This contains the information about recovery \n points returned in results of a search job.

" + } + }, + "com.amazonaws.backupsearch#SearchJobBackupsResults": { + "type": "list", + "member": { + "target": "com.amazonaws.backupsearch#SearchJobBackupsResult" + } + }, + "com.amazonaws.backupsearch#SearchJobState": { + "type": "enum", + "members": { + "RUNNING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RUNNING" + } + }, + "COMPLETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPLETED" + } + }, + "STOPPING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STOPPING" + } + }, + "STOPPED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STOPPED" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + } + } + }, + "com.amazonaws.backupsearch#SearchJobSummary": { + "type": "structure", + "members": { + "SearchJobIdentifier": { + "target": "com.amazonaws.backupsearch#GenericId", + "traits": { + "smithy.api#documentation": "

The unique string that specifies the search job.

" + } + }, + "SearchJobArn": { + "target": "com.amazonaws.backupsearch#SearchJobArn", + "traits": { + "smithy.api#documentation": "

The unique string that identifies the Amazon Resource \n Name (ARN) of the specified search job.

" + } + }, + "Name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

This is the name of the search job.

" + } + }, + "Status": { + "target": "com.amazonaws.backupsearch#SearchJobState", + "traits": { + "smithy.api#documentation": "

This is the status of the search job.

" + } + }, + "CreationTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

This is the creation time of the search job.

" + } + }, + "CompletionTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

This is the completion time of the search job.

" + } + }, + "SearchScopeSummary": { + "target": "com.amazonaws.backupsearch#SearchScopeSummary", + "traits": { + "smithy.api#documentation": "

Returned summary of the specified search job scope, \n including:\n

\n
    \n
  • \n

    TotalBackupsToScanCount, the number of \n recovery points returned by the search.

    \n
  • \n
  • \n

    TotalItemsToScanCount, the number of \n items returned by the search.

    \n
  • \n
" + } + }, + "StatusMessage": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A status message will be returned for either a \n earch job with a status of ERRORED or a status of \n COMPLETED jobs with issues.

\n

For example, a message may say that a search \n contained recovery points unable to be scanned because \n of a permissions issue.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

This is information pertaining to a search job.

" + } + }, + "com.amazonaws.backupsearch#SearchJobs": { + "type": "list", + "member": { + "target": "com.amazonaws.backupsearch#SearchJobSummary" + } + }, + "com.amazonaws.backupsearch#SearchResultExportJob": { + "type": "resource", + "identifiers": { + "ExportJobIdentifier": { + "target": "com.amazonaws.backupsearch#GenericId" + } + }, + "properties": { + "ExportJobArn": { + "target": "com.amazonaws.backupsearch#ExportJobArn" + }, + "SearchJobIdentifier": { + "target": "com.amazonaws.backupsearch#GenericId" + }, + "SearchJobArn": { + "target": "com.amazonaws.backupsearch#SearchJobArn" + }, + "Status": { + "target": "com.amazonaws.backupsearch#ExportJobStatus" + }, + "StatusMessage": { + "target": "smithy.api#String" + }, + "CreationTime": { + "target": "smithy.api#Timestamp" + }, + "CompletionTime": { + "target": "smithy.api#Timestamp" + }, + "ExportSpecification": { + "target": "com.amazonaws.backupsearch#ExportSpecification" + }, + "ClientToken": { + "target": "smithy.api#String" + }, + "Tags": { + "target": "com.amazonaws.backupsearch#TagMap" + }, + "RoleArn": { + "target": "com.amazonaws.backupsearch#IamRoleArn" + } + }, + "create": { + "target": "com.amazonaws.backupsearch#StartSearchResultExportJob" + }, + "read": { + "target": "com.amazonaws.backupsearch#GetSearchResultExportJob" + }, + "list": { + "target": "com.amazonaws.backupsearch#ListSearchResultExportJobs" + } + }, + "com.amazonaws.backupsearch#SearchScope": { + "type": "structure", + "members": { + "BackupResourceTypes": { + "target": "com.amazonaws.backupsearch#ResourceTypeList", + "traits": { + "smithy.api#documentation": "

The resource types included in a search.

\n

Eligible resource types include S3 and EBS.

", + "smithy.api#required": {} + } + }, + "BackupResourceCreationTime": { + "target": "com.amazonaws.backupsearch#BackupCreationTimeFilter", + "traits": { + "smithy.api#documentation": "

This is the time a backup resource was created.

" + } + }, + "SourceResourceArns": { + "target": "com.amazonaws.backupsearch#ResourceArnList", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies \n the source resources.

" + } + }, + "BackupResourceArns": { + "target": "com.amazonaws.backupsearch#RecoveryPointArnList", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies \n the backup resources.

" + } + }, + "BackupResourceTags": { + "target": "com.amazonaws.backupsearch#TagMap", + "traits": { + "smithy.api#documentation": "

These are one or more tags on the backup (recovery \n point).

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The search scope is all backup \n properties input into a search.

" + } + }, + "com.amazonaws.backupsearch#SearchScopeSummary": { + "type": "structure", + "members": { + "TotalRecoveryPointsToScanCount": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

This is the count of the total number of backups \n that will be scanned in a search.

" + } + }, + "TotalItemsToScanCount": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

This is the count of the total number of items \n that will be scanned in a search.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The summary of the specified search job scope, \n including:\n

\n
    \n
  • \n

    TotalBackupsToScanCount, the number of \n recovery points returned by the search.

    \n
  • \n
  • \n

    TotalItemsToScanCount, the number of \n items returned by the search.

    \n
  • \n
" + } + }, + "com.amazonaws.backupsearch#ServiceQuotaExceededException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

This request was not successful due to a service quota exceeding limits.

", + "smithy.api#required": {} + } + }, + "resourceId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Identifier of the resource.

", + "smithy.api#required": {} + } + }, + "resourceType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Type of resource.

", + "smithy.api#required": {} + } + }, + "serviceCode": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

This is the code unique to the originating service with the quota.

", + "smithy.api#required": {} + } + }, + "quotaCode": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

This is the code specific to the quota type.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request denied due to exceeding the quota limits permitted.

", + "smithy.api#error": "client", + "smithy.api#httpError": 402 + } + }, + "com.amazonaws.backupsearch#StartSearchJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupsearch#StartSearchJobInput" + }, + "output": { + "target": "com.amazonaws.backupsearch#StartSearchJobOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupsearch#ConflictException" + }, + { + "target": "com.amazonaws.backupsearch#ServiceQuotaExceededException" + } + ], + "traits": { + "smithy.api#documentation": "

This operation creates a search job which returns \n recovery points filtered by SearchScope and items \n filtered by ItemFilters.

\n

You can optionally include ClientToken, \n EncryptionKeyArn, Name, and/or Tags.

", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/search-jobs" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.backupsearch#StartSearchJobInput": { + "type": "structure", + "members": { + "Tags": { + "target": "com.amazonaws.backupsearch#TagMap", + "traits": { + "smithy.api#documentation": "

List of tags returned by the operation.

" + } + }, + "Name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Include alphanumeric characters to create a \n name for this search job.

", + "smithy.api#length": { + "max": 500 + } + } + }, + "EncryptionKeyArn": { + "target": "com.amazonaws.backupsearch#EncryptionKeyArn", + "traits": { + "smithy.api#documentation": "

The encryption key for the specified \n search job.

" + } + }, + "ClientToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Include this parameter to allow multiple identical \n calls for idempotency.

\n

A client token is valid for 8 hours after the first \n request that uses it is completed. After this time,\n any request with the same token is treated as a \n new request.

" + } + }, + "SearchScope": { + "target": "com.amazonaws.backupsearch#SearchScope", + "traits": { + "smithy.api#documentation": "

This object can contain BackupResourceTypes, \n BackupResourceArns, BackupResourceCreationTime, \n BackupResourceTags, and SourceResourceArns to \n filter the recovery points returned by the search \n job.

", + "smithy.api#required": {} + } + }, + "ItemFilters": { + "target": "com.amazonaws.backupsearch#ItemFilters", + "traits": { + "smithy.api#documentation": "

Item Filters represent all input item \n properties specified when the search was \n created.

\n

Contains either EBSItemFilters or \n S3ItemFilters

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.backupsearch#StartSearchJobOutput": { + "type": "structure", + "members": { + "SearchJobArn": { + "target": "com.amazonaws.backupsearch#SearchJobArn", + "traits": { + "smithy.api#documentation": "

The unique string that identifies the Amazon Resource \n Name (ARN) of the specified search job.

" + } + }, + "CreationTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that a job was created, in Unix format and Coordinated\n Universal Time (UTC). The value of CompletionTime is accurate to milliseconds.\n For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087\n AM.

" + } + }, + "SearchJobIdentifier": { + "target": "com.amazonaws.backupsearch#GenericId", + "traits": { + "smithy.api#documentation": "

The unique string that specifies the search job.

", + "smithy.api#notProperty": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.backupsearch#StartSearchResultExportJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupsearch#StartSearchResultExportJobInput" + }, + "output": { + "target": "com.amazonaws.backupsearch#StartSearchResultExportJobOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupsearch#ConflictException" + }, + { + "target": "com.amazonaws.backupsearch#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backupsearch#ServiceQuotaExceededException" + } + ], + "traits": { + "smithy.api#documentation": "

This operations starts a job to export the results \n of search job to a designated S3 bucket.

", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/export-search-jobs" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.backupsearch#StartSearchResultExportJobInput": { + "type": "structure", + "members": { + "SearchJobIdentifier": { + "target": "com.amazonaws.backupsearch#GenericId", + "traits": { + "smithy.api#documentation": "

The unique string that specifies the search job.

", + "smithy.api#required": {} + } + }, + "ExportSpecification": { + "target": "com.amazonaws.backupsearch#ExportSpecification", + "traits": { + "smithy.api#documentation": "

This specification contains a required string of the \n destination bucket; optionally, you can include the \n destination prefix.

", + "smithy.api#required": {} + } + }, + "ClientToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Include this parameter to allow multiple identical \n calls for idempotency.

\n

A client token is valid for 8 hours after the first \n request that uses it is completed. After this time,\n any request with the same token is treated as a \n new request.

" + } + }, + "Tags": { + "target": "com.amazonaws.backupsearch#TagMap", + "traits": { + "smithy.api#documentation": "

Optional tags to include. A tag is a key-value pair you can use to manage, \n filter, and search for your resources. Allowed characters include UTF-8 letters, \n numbers, spaces, and the following characters: + - = . _ : /.

" + } + }, + "RoleArn": { + "target": "com.amazonaws.backupsearch#IamRoleArn", + "traits": { + "smithy.api#documentation": "

This parameter specifies the role ARN used to start \n the search results export jobs.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.backupsearch#StartSearchResultExportJobOutput": { + "type": "structure", + "members": { + "ExportJobArn": { + "target": "com.amazonaws.backupsearch#ExportJobArn", + "traits": { + "smithy.api#documentation": "

This is the unique ARN (Amazon Resource Name) that \n belongs to the new export job.

" + } + }, + "ExportJobIdentifier": { + "target": "com.amazonaws.backupsearch#GenericId", + "traits": { + "smithy.api#documentation": "

This is the unique identifier that \n specifies the new export job.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.backupsearch#StopSearchJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupsearch#StopSearchJobInput" + }, + "output": { + "target": "com.amazonaws.backupsearch#StopSearchJobOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupsearch#ConflictException" + }, + { + "target": "com.amazonaws.backupsearch#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

This operations ends a search job.

\n

Only a search job with a status of RUNNING \n can be stopped.

", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/search-jobs/{SearchJobIdentifier}/actions/cancel" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.backupsearch#StopSearchJobInput": { + "type": "structure", + "members": { + "SearchJobIdentifier": { + "target": "com.amazonaws.backupsearch#GenericId", + "traits": { + "smithy.api#documentation": "

The unique string that specifies the search job.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.backupsearch#StopSearchJobOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.backupsearch#StringCondition": { + "type": "structure", + "members": { + "Value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The value of the string.

", + "smithy.api#required": {} + } + }, + "Operator": { + "target": "com.amazonaws.backupsearch#StringConditionOperator", + "traits": { + "smithy.api#default": "EQUALS_TO", + "smithy.api#documentation": "

A string that defines what values will be \n returned.

\n

If this is included, avoid combinations of \n operators that will return all possible values. \n For example, including both EQUALS_TO \n and NOT_EQUALS_TO with a value of 4 \n will return all values.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

This contains the value of the string and can contain \n one or more operators.

" + } + }, + "com.amazonaws.backupsearch#StringConditionList": { + "type": "list", + "member": { + "target": "com.amazonaws.backupsearch#StringCondition" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.backupsearch#StringConditionOperator": { + "type": "enum", + "members": { + "EQUALS_TO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EQUALS_TO" + } + }, + "NOT_EQUALS_TO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOT_EQUALS_TO" + } + }, + "CONTAINS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CONTAINS" + } + }, + "DOES_NOT_CONTAIN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DOES_NOT_CONTAIN" + } + }, + "BEGINS_WITH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BEGINS_WITH" + } + }, + "ENDS_WITH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENDS_WITH" + } + }, + "DOES_NOT_BEGIN_WITH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DOES_NOT_BEGIN_WITH" + } + }, + "DOES_NOT_END_WITH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DOES_NOT_END_WITH" + } + } + } + }, + "com.amazonaws.backupsearch#TagKeys": { + "type": "list", + "member": { + "target": "smithy.api#String" + } + }, + "com.amazonaws.backupsearch#TagMap": { + "type": "map", + "key": { + "target": "smithy.api#String" + }, + "value": { + "target": "smithy.api#String" + }, + "traits": { + "smithy.api#sparse": {} + } + }, + "com.amazonaws.backupsearch#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupsearch#TagResourceRequest" + }, + "output": { + "target": "com.amazonaws.backupsearch#TagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.backupsearch#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

This operation puts tags on the resource you indicate.

", + "smithy.api#http": { + "uri": "/tags/{ResourceArn}", + "method": "POST" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.backupsearch#TagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies \n the resource.

\n

This is the resource that will have the indicated tags.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.backupsearch#TagMap", + "traits": { + "smithy.api#documentation": "

Required tags to include. A tag is a key-value pair you can use to manage, \n filter, and search for your resources. Allowed characters include UTF-8 letters, \n numbers, spaces, and the following characters: + - = . _ : /.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.backupsearch#TagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.backupsearch#ThrottlingException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Request was unsuccessful due to request throttling.

", + "smithy.api#required": {} + } + }, + "serviceCode": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

This is the code unique to the originating service.

" + } + }, + "quotaCode": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

This is the code unique to the originating service with the quota.

" + } + }, + "retryAfterSeconds": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

Retry the call after number of seconds.

", + "smithy.api#httpHeader": "Retry-After" + } + } + }, + "traits": { + "smithy.api#documentation": "

The request was denied due to request throttling.

", + "smithy.api#error": "client", + "smithy.api#httpError": 429, + "smithy.api#retryable": { + "throttling": true + } + } + }, + "com.amazonaws.backupsearch#TimeCondition": { + "type": "structure", + "members": { + "Value": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

This is the timestamp value of the time condition.

", + "smithy.api#required": {} + } + }, + "Operator": { + "target": "com.amazonaws.backupsearch#TimeConditionOperator", + "traits": { + "smithy.api#default": "EQUALS_TO", + "smithy.api#documentation": "

A string that defines what values will be \n returned.

\n

If this is included, avoid combinations of \n operators that will return all possible values. \n For example, including both EQUALS_TO \n and NOT_EQUALS_TO with a value of 4 \n will return all values.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A time condition denotes a creation time, last modification time, \n or other time.

" + } + }, + "com.amazonaws.backupsearch#TimeConditionList": { + "type": "list", + "member": { + "target": "com.amazonaws.backupsearch#TimeCondition" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.backupsearch#TimeConditionOperator": { + "type": "enum", + "members": { + "EQUALS_TO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EQUALS_TO" + } + }, + "NOT_EQUALS_TO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOT_EQUALS_TO" + } + }, + "LESS_THAN_EQUAL_TO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LESS_THAN_EQUAL_TO" + } + }, + "GREATER_THAN_EQUAL_TO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GREATER_THAN_EQUAL_TO" + } + } + } + }, + "com.amazonaws.backupsearch#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupsearch#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.backupsearch#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.backupsearch#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

This operation removes tags from the specified resource.

", + "smithy.api#http": { + "uri": "/tags/{ResourceArn}", + "method": "DELETE" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.backupsearch#UntagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies \n the resource where you want to remove tags.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "TagKeys": { + "target": "com.amazonaws.backupsearch#TagKeys", + "traits": { + "smithy.api#documentation": "

This required parameter contains the tag keys you \n want to remove from the source.

", + "smithy.api#httpQuery": "tagKeys", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.backupsearch#UntagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.backupsearch#ValidationException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The input fails to satisfy the constraints specified by an Amazon service.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The input fails to satisfy the constraints specified by a service.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + } + } +} \ No newline at end of file diff --git a/models/batch.json b/models/batch.json index b7de037903..7319b98ad9 100644 --- a/models/batch.json +++ b/models/batch.json @@ -1810,27 +1810,27 @@ "allocationStrategy": { "target": "com.amazonaws.batch#CRAllocationStrategy", "traits": { - "smithy.api#documentation": "

The allocation strategy to use for the compute resource if not enough instances of the best\n fitting instance type can be allocated. This might be because of availability of the instance\n type in the Region or Amazon EC2 service limits. For more\n information, see Allocation strategies in the Batch User Guide.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
\n
\n
BEST_FIT (default)
\n
\n

Batch selects an instance type that best fits the needs of the jobs with a preference\n for the lowest-cost instance type. If additional instances of the selected instance type\n aren't available, Batch waits for the additional instances to be available. If there aren't\n enough instances available or the user is reaching Amazon EC2 service limits,\n additional jobs aren't run until the currently running jobs are completed. This allocation\n strategy keeps costs lower but can limit scaling. If you're using Spot Fleets with\n BEST_FIT, the Spot Fleet IAM Role must be specified. Compute resources that use\n a BEST_FIT allocation strategy don't support infrastructure updates and can't\n update some parameters. For more information, see Updating compute environments in\n the Batch User Guide.

\n
\n
BEST_FIT_PROGRESSIVE
\n
\n

Batch selects additional instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If\n additional instances of the previously selected instance types aren't available, Batch\n selects new instance types.

\n
\n
SPOT_CAPACITY_OPTIMIZED
\n
\n

Batch selects one or more instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types that are less likely to be\n interrupted. This allocation strategy is only available for Spot Instance compute\n resources.

\n
\n
SPOT_PRICE_CAPACITY_OPTIMIZED
\n
\n

The price and capacity optimized allocation strategy looks at both price and capacity to\n select the Spot Instance pools that are the least likely to be interrupted and have the lowest\n possible price. This allocation strategy is only available for Spot Instance compute\n resources.

\n
\n
\n

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and\n SPOT_PRICE_CAPACITY_OPTIMIZED\n (recommended) strategies using On-Demand or Spot Instances, and the\n BEST_FIT strategy using Spot Instances, Batch might need to exceed\n maxvCpus to meet your capacity requirements. In this event, Batch never exceeds\n maxvCpus by more than a single instance.

" + "smithy.api#documentation": "

The allocation strategy to use for the compute resource if not enough instances of the best\n fitting instance type can be allocated. This might be because of availability of the instance\n type in the Region or Amazon EC2 service limits. For more\n information, see Allocation strategies in the Batch User Guide.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
\n
\n
BEST_FIT (default)
\n
\n

Batch selects an instance type that best fits the needs of the jobs with a preference\n for the lowest-cost instance type. If additional instances of the selected instance type\n aren't available, Batch waits for the additional instances to be available. If there aren't\n enough instances available or the user is reaching Amazon EC2 service limits,\n additional jobs aren't run until the currently running jobs are completed. This allocation\n strategy keeps costs lower but can limit scaling. If you're using Spot Fleets with\n BEST_FIT, the Spot Fleet IAM Role must be specified. Compute resources that use\n a BEST_FIT allocation strategy don't support infrastructure updates and can't\n update some parameters. For more information, see Updating compute environments in\n the Batch User Guide.

\n
\n
BEST_FIT_PROGRESSIVE
\n
\n

Batch selects additional instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If\n additional instances of the previously selected instance types aren't available, Batch\n selects new instance types.

\n
\n
SPOT_CAPACITY_OPTIMIZED
\n
\n

Batch selects one or more instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types that are less likely to be\n interrupted. This allocation strategy is only available for Spot Instance compute\n resources.

\n
\n
SPOT_PRICE_CAPACITY_OPTIMIZED
\n
\n

The price and capacity optimized allocation strategy looks at both price and capacity to\n select the Spot Instance pools that are the least likely to be interrupted and have the lowest\n possible price. This allocation strategy is only available for Spot Instance compute\n resources.

\n
\n
\n

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and\n SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot \n Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to \n exceed maxvCpus to meet your capacity requirements. In this event, Batch never \n exceeds maxvCpus by more than a single instance.

" } }, "minvCpus": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "

The minimum number of\n vCPUs that\n a\n compute\n environment should maintain (even if the compute environment is DISABLED).

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
" + "smithy.api#documentation": "

The minimum number of vCPUs that a compute environment should maintain (even if the compute \n environment is DISABLED).

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
" } }, "maxvCpus": { "target": "com.amazonaws.batch#Integer", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The maximum number of\n vCPUs that a\n compute environment can\n support.

\n \n

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and\n SPOT_PRICE_CAPACITY_OPTIMIZED\n (recommended) strategies using On-Demand or Spot Instances, and the\n BEST_FIT strategy using Spot Instances, Batch might need to exceed\n maxvCpus to meet your capacity requirements. In this event, Batch never exceeds\n maxvCpus by more than a single instance.

\n
", + "smithy.api#documentation": "

The maximum number of vCPUs that a compute environment can support.

\n \n

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and\n SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot Instances, \n and the BEST_FIT strategy using Spot Instances, Batch might need to exceed\n maxvCpus to meet your capacity requirements. In this event, Batch never exceeds\n maxvCpus by more than a single instance.

\n
", "smithy.api#required": {} } }, "desiredvCpus": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "

The desired number of\n vCPUS in the\n compute environment. Batch modifies this value between the minimum and maximum values based on\n job queue demand.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
" + "smithy.api#documentation": "

The desired number of vCPUS in the compute environment. Batch modifies this value between \n the minimum and maximum values based on job queue demand.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
" } }, "instanceTypes": { @@ -1889,7 +1889,7 @@ "bidPercentage": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "

The maximum percentage that a Spot Instance price can be when compared with the On-Demand\n price for that instance type before instances are launched. For example, if your maximum\n percentage is 20%, then the Spot price must be less than 20% of the current On-Demand price for\n that Amazon EC2 instance. You always pay the lowest (market) price and never more than your maximum\n percentage. If you leave this field empty, the default value is 100% of the On-Demand\n price. For most use cases,\n we recommend leaving this field empty.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
" + "smithy.api#documentation": "

The maximum percentage that a Spot Instance price can be when compared with the On-Demand\n price for that instance type before instances are launched. For example, if your maximum\n percentage is 20%, then the Spot price must be less than 20% of the current On-Demand price for\n that Amazon EC2 instance. You always pay the lowest (market) price and never more than your maximum\n percentage. If you leave this field empty, the default value is 100% of the On-Demand\n price. For most use cases, we recommend leaving this field empty.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
" } }, "spotIamFleetRole": { @@ -1921,19 +1921,19 @@ "minvCpus": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "

The minimum number of\n vCPUs that\n an environment should maintain (even if the compute environment is DISABLED).

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
" + "smithy.api#documentation": "

The minimum number of vCPUs that an environment should maintain (even if the compute environment \n is DISABLED).

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
" } }, "maxvCpus": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "

The maximum number of Amazon EC2 vCPUs that an environment can reach.

\n \n

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and\n SPOT_PRICE_CAPACITY_OPTIMIZED\n (recommended) strategies using On-Demand or Spot Instances, and the\n BEST_FIT strategy using Spot Instances, Batch might need to exceed\n maxvCpus to meet your capacity requirements. In this event, Batch never exceeds\n maxvCpus by more than a single instance.

\n
" + "smithy.api#documentation": "

The maximum number of Amazon EC2 vCPUs that an environment can reach.

\n \n

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and\n SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot \n Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to \n exceed maxvCpus to meet your capacity requirements. In this event, Batch never \n exceeds maxvCpus by more than a single instance.

\n
" } }, "desiredvCpus": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "

The desired number of\n vCPUS in the\n compute environment. Batch modifies this value between the minimum and maximum values based on\n job queue demand.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
\n \n

Batch doesn't support changing the desired number of vCPUs of an existing compute\n environment. Don't specify this parameter for compute environments using Amazon EKS clusters.

\n
\n \n

When you update the desiredvCpus setting, the value must be between the\n minvCpus and maxvCpus values.

\n

Additionally, the updated desiredvCpus value must be greater than or equal to\n the current desiredvCpus value. For more information, see Troubleshooting\n Batch in the Batch User Guide.

\n
" + "smithy.api#documentation": "

The desired number of vCPUS in the compute environment. Batch modifies this value between \n the minimum and maximum values based on job queue demand.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
\n \n

Batch doesn't support changing the desired number of vCPUs of an existing compute\n environment. Don't specify this parameter for compute environments using Amazon EKS clusters.

\n
\n \n

When you update the desiredvCpus setting, the value must be between the\n minvCpus and maxvCpus values.

\n

Additionally, the updated desiredvCpus value must be greater than or equal to\n the current desiredvCpus value. For more information, see Troubleshooting\n Batch in the Batch User Guide.

\n
" } }, "subnets": { @@ -1951,7 +1951,7 @@ "allocationStrategy": { "target": "com.amazonaws.batch#CRUpdateAllocationStrategy", "traits": { - "smithy.api#documentation": "

The allocation strategy to use for the compute resource if there's not enough instances of\n the best fitting instance type that can be allocated. This might be because of availability of\n the instance type in the Region or Amazon EC2 service limits. For more\n information, see Allocation strategies in the Batch User Guide.

\n

When updating a compute environment, changing the allocation strategy requires an\n infrastructure update of the compute environment. For more information, see Updating compute\n environments in the Batch User Guide. BEST_FIT isn't\n supported when updating a compute environment.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
\n
\n
BEST_FIT_PROGRESSIVE
\n
\n

Batch selects additional instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If\n additional instances of the previously selected instance types aren't available, Batch\n selects new instance types.

\n
\n
SPOT_CAPACITY_OPTIMIZED
\n
\n

Batch selects one or more instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types that are less likely to be\n interrupted. This allocation strategy is only available for Spot Instance compute\n resources.

\n
\n
SPOT_PRICE_CAPACITY_OPTIMIZED
\n
\n

The price and capacity optimized allocation strategy looks at both price and capacity to\n select the Spot Instance pools that are the least likely to be interrupted and have the lowest\n possible price. This allocation strategy is only available for Spot Instance compute\n resources.

\n
\n
\n

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and\n SPOT_PRICE_CAPACITY_OPTIMIZED\n (recommended) strategies using On-Demand or Spot Instances, and the\n BEST_FIT strategy using Spot Instances, Batch might need to exceed\n maxvCpus to meet your capacity requirements. In this event, Batch never exceeds\n maxvCpus by more than a single instance.

" + "smithy.api#documentation": "

The allocation strategy to use for the compute resource if there's not enough instances of\n the best fitting instance type that can be allocated. This might be because of availability of\n the instance type in the Region or Amazon EC2 service limits. For more\n information, see Allocation strategies in the Batch User Guide.

\n

When updating a compute environment, changing the allocation strategy requires an\n infrastructure update of the compute environment. For more information, see Updating compute\n environments in the Batch User Guide. BEST_FIT isn't\n supported when updating a compute environment.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
\n
\n
BEST_FIT_PROGRESSIVE
\n
\n

Batch selects additional instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If\n additional instances of the previously selected instance types aren't available, Batch\n selects new instance types.

\n
\n
SPOT_CAPACITY_OPTIMIZED
\n
\n

Batch selects one or more instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types that are less likely to be\n interrupted. This allocation strategy is only available for Spot Instance compute\n resources.

\n
\n
SPOT_PRICE_CAPACITY_OPTIMIZED
\n
\n

The price and capacity optimized allocation strategy looks at both price and capacity to\n select the Spot Instance pools that are the least likely to be interrupted and have the lowest\n possible price. This allocation strategy is only available for Spot Instance compute\n resources.

\n
\n
\n

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and\n SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot Instances, \n and the BEST_FIT strategy using Spot Instances, Batch might need to exceed\n maxvCpus to meet your capacity requirements. In this event, Batch never exceeds\n maxvCpus by more than a single instance.

" } }, "instanceTypes": { @@ -1969,7 +1969,7 @@ "instanceRole": { "target": "com.amazonaws.batch#String", "traits": { - "smithy.api#documentation": "

The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment.\n Required for Amazon EC2\n instances. You can specify the short name or full Amazon Resource Name (ARN) of an instance\n profile. For example, \n ecsInstanceRole\n or\n arn:aws:iam:::instance-profile/ecsInstanceRole\n .\n For more information, see Amazon ECS instance role in the Batch User Guide.

\n

When updating a compute environment, changing this setting requires an infrastructure update\n of the compute environment. For more information, see Updating compute environments in the\n Batch User Guide.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
" + "smithy.api#documentation": "

The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment.\n Required for Amazon EC2 instances. You can specify the short name or full Amazon Resource Name (ARN) of an instance\n profile. For example, \n ecsInstanceRole\n or\n arn:aws:iam:::instance-profile/ecsInstanceRole\n .\n For more information, see Amazon ECS instance role in the Batch User Guide.

\n

When updating a compute environment, changing this setting requires an infrastructure update\n of the compute environment. For more information, see Updating compute environments in the\n Batch User Guide.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
" } }, "tags": { @@ -1987,7 +1987,7 @@ "bidPercentage": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "

The maximum percentage that a Spot Instance price can be when compared with the On-Demand\n price for that instance type before instances are launched. For example, if your maximum\n percentage is 20%, the Spot price must be less than 20% of the current On-Demand price for that\n Amazon EC2 instance. You always pay the lowest (market) price and never more than your maximum\n percentage. For most use\n cases, we recommend leaving this field empty.

\n

When updating a compute environment, changing the bid percentage requires an infrastructure\n update of the compute environment. For more information, see Updating compute environments in the\n Batch User Guide.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
" + "smithy.api#documentation": "

The maximum percentage that a Spot Instance price can be when compared with the On-Demand\n price for that instance type before instances are launched. For example, if your maximum\n percentage is 20%, the Spot price must be less than 20% of the current On-Demand price for that\n Amazon EC2 instance. You always pay the lowest (market) price and never more than your maximum\n percentage. For most use cases, we recommend leaving this field empty.

\n

When updating a compute environment, changing the bid percentage requires an infrastructure\n update of the compute environment. For more information, see Updating compute environments in the\n Batch User Guide.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
" } }, "launchTemplate": { @@ -2061,7 +2061,7 @@ "executionRoleArn": { "target": "com.amazonaws.batch#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the\n execution\n role that Batch can assume. For more information,\n see Batch execution IAM\n role in the Batch User Guide.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the execution role that Batch can assume. For more information,\n see Batch execution IAM\n role in the Batch User Guide.

" } }, "volumes": { @@ -2263,7 +2263,7 @@ "image": { "target": "com.amazonaws.batch#String", "traits": { - "smithy.api#documentation": "

Required.\n The image used to start a container. This string is passed directly to the\n Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are\n specified with\n \n repository-url/image:tag\n .\n It can be 255 characters long. It can contain uppercase and lowercase letters, numbers,\n hyphens (-), underscores (_), colons (:), periods (.), forward slashes (/), and number signs (#). This parameter maps to Image in the\n Create a container section of the Docker Remote API and the IMAGE\n parameter of docker run.

\n \n

Docker image architecture must match the processor architecture of the compute resources\n that they're scheduled on. For example, ARM-based Docker images can only run on ARM-based\n compute resources.

\n
\n
    \n
  • \n

    Images in Amazon ECR Public repositories use the full registry/repository[:tag] or\n registry/repository[@digest] naming conventions. For example,\n public.ecr.aws/registry_alias/my-web-app:latest\n .

    \n
  • \n
  • \n

    Images in Amazon ECR repositories use the full registry and repository URI (for example,\n 123456789012.dkr.ecr..amazonaws.com/).

    \n
  • \n
  • \n

    Images in official repositories on Docker Hub use a single name (for example,\n ubuntu or mongo).

    \n
  • \n
  • \n

    Images in other repositories on Docker Hub are qualified with an organization name (for\n example, amazon/amazon-ecs-agent).

    \n
  • \n
  • \n

    Images in other online repositories are qualified further by a domain name (for example,\n quay.io/assemblyline/ubuntu).

    \n
  • \n
" + "smithy.api#documentation": "

Required. The image used to start a container. This string is passed directly to the\n Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are\n specified with\n \n repository-url/image:tag\n .\n It can be 255 characters long. It can contain uppercase and lowercase letters, numbers,\n hyphens (-), underscores (_), colons (:), periods (.), forward slashes (/), and number signs (#). This parameter maps to Image in the\n Create a container section of the Docker Remote API and the IMAGE\n parameter of docker run.

\n \n

Docker image architecture must match the processor architecture of the compute resources\n that they're scheduled on. For example, ARM-based Docker images can only run on ARM-based\n compute resources.

\n
\n
    \n
  • \n

    Images in Amazon ECR Public repositories use the full registry/repository[:tag] or\n registry/repository[@digest] naming conventions. For example,\n public.ecr.aws/registry_alias/my-web-app:latest\n .

    \n
  • \n
  • \n

    Images in Amazon ECR repositories use the full registry and repository URI (for example,\n 123456789012.dkr.ecr..amazonaws.com/).

    \n
  • \n
  • \n

    Images in official repositories on Docker Hub use a single name (for example,\n ubuntu or mongo).

    \n
  • \n
  • \n

    Images in other repositories on Docker Hub are qualified with an organization name (for\n example, amazon/amazon-ecs-agent).

    \n
  • \n
  • \n

    Images in other online repositories are qualified further by a domain name (for example,\n quay.io/assemblyline/ubuntu).

    \n
  • \n
" } }, "vcpus": { @@ -2406,7 +2406,7 @@ } }, "traits": { - "smithy.api#documentation": "

Container properties are used\n for\n Amazon ECS based job definitions. These properties to describe the container that's\n launched as part of a job.

" + "smithy.api#documentation": "

Container properties are used for Amazon ECS based job definitions. These properties to describe the \n container that's launched as part of a job.

" } }, "com.amazonaws.batch#ContainerSummary": { @@ -2446,7 +2446,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Batch compute environment. You can create MANAGED or\n UNMANAGED compute environments. MANAGED compute environments can\n use Amazon EC2 or Fargate resources. UNMANAGED compute environments can only use\n EC2 resources.

\n

In a managed compute environment, Batch manages the capacity and instance types of the\n compute resources within the environment. This is based on the compute resource specification\n that you define or the launch template that you\n specify when you create the compute environment. Either, you can choose to use EC2 On-Demand\n Instances and EC2 Spot Instances. Or, you can use Fargate and Fargate Spot capacity in\n your managed compute environment. You can optionally set a maximum price so that Spot\n Instances only launch when the Spot Instance price is less than a specified percentage of the\n On-Demand price.

\n \n

Multi-node parallel jobs aren't supported on Spot Instances.

\n
\n

In an unmanaged compute environment, you can manage your own EC2 compute resources and\n have flexibility with how you configure your compute resources. For example, you can use\n custom AMIs. However, you must verify that each of your AMIs meet the Amazon ECS container instance\n AMI specification. For more information, see container instance AMIs in the\n Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment,\n you can use the DescribeComputeEnvironments operation to find the Amazon ECS\n cluster that's associated with it. Then, launch your container instances into that Amazon ECS\n cluster. For more information, see Launching an Amazon ECS container\n instance in the Amazon Elastic Container Service Developer Guide.

\n \n

To create a compute environment that uses EKS resources, the caller must have\n permissions to call eks:DescribeCluster.

\n
\n \n

Batch doesn't automatically upgrade the AMIs in a compute environment after it's\n created. For example, it also doesn't update the AMIs in your compute environment when a\n newer version of the Amazon ECS optimized AMI is available. You're responsible for the management\n of the guest operating system. This includes any updates and security patches. You're also\n responsible for any additional application software or utilities that you install on the\n compute resources. There are two ways to use a new AMI for your Batch jobs. The original\n method is to complete these steps:

\n
    \n
  1. \n

    Create a new compute environment with the new AMI.

    \n
  2. \n
  3. \n

    Add the compute environment to an existing job queue.

    \n
  4. \n
  5. \n

    Remove the earlier compute environment from your job queue.

    \n
  6. \n
  7. \n

    Delete the earlier compute environment.

    \n
  8. \n
\n

In April 2022, Batch added enhanced support for updating compute environments. For\n more information, see Updating compute environments.\n To use the enhanced updating of compute environments to update AMIs, follow these\n rules:

\n
    \n
  • \n

    Either don't set the service role (serviceRole) parameter or set it to\n the AWSBatchServiceRole service-linked role.

    \n
  • \n
  • \n

    Set the allocation strategy (allocationStrategy) parameter to\n BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED, or\n SPOT_PRICE_CAPACITY_OPTIMIZED.

    \n
  • \n
  • \n

    Set the update to latest image version (updateToLatestImageVersion)\n parameter to\n true.\n The updateToLatestImageVersion parameter is used when you update a compute\n environment. This parameter is ignored when you create a compute\n environment.

    \n
  • \n
  • \n

    Don't specify an AMI ID in imageId, imageIdOverride (in\n \n ec2Configuration\n ), or in the launch template\n (launchTemplate). In that case, Batch selects the latest Amazon ECS\n optimized AMI that's supported by Batch at the time the infrastructure update is\n initiated. Alternatively, you can specify the AMI ID in the imageId or\n imageIdOverride parameters, or the launch template identified by the\n LaunchTemplate properties. Changing any of these properties starts an\n infrastructure update. If the AMI ID is specified in the launch template, it can't be\n replaced by specifying an AMI ID in either the imageId or\n imageIdOverride parameters. It can only be replaced by specifying a\n different launch template, or if the launch template version is set to\n $Default or $Latest, by setting either a new default version\n for the launch template (if $Default) or by adding a new version to the\n launch template (if $Latest).

    \n
  • \n
\n

If these rules are followed, any update that starts an infrastructure update causes the\n AMI ID to be re-selected. If the version setting in the launch template\n (launchTemplate) is set to $Latest or $Default, the\n latest or default version of the launch template is evaluated up at the time of the\n infrastructure update, even if the launchTemplate wasn't updated.

\n
", + "smithy.api#documentation": "

Creates an Batch compute environment. You can create MANAGED or\n UNMANAGED compute environments. MANAGED compute environments can\n use Amazon EC2 or Fargate resources. UNMANAGED compute environments can only use\n EC2 resources.

\n

In a managed compute environment, Batch manages the capacity and instance types of the\n compute resources within the environment. This is based on the compute resource specification\n that you define or the launch template that you\n specify when you create the compute environment. Either, you can choose to use EC2 On-Demand\n Instances and EC2 Spot Instances. Or, you can use Fargate and Fargate Spot capacity in\n your managed compute environment. You can optionally set a maximum price so that Spot\n Instances only launch when the Spot Instance price is less than a specified percentage of the\n On-Demand price.

\n \n

Multi-node parallel jobs aren't supported on Spot Instances.

\n
\n

In an unmanaged compute environment, you can manage your own EC2 compute resources and\n have flexibility with how you configure your compute resources. For example, you can use\n custom AMIs. However, you must verify that each of your AMIs meet the Amazon ECS container instance\n AMI specification. For more information, see container instance AMIs in the\n Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment,\n you can use the DescribeComputeEnvironments operation to find the Amazon ECS\n cluster that's associated with it. Then, launch your container instances into that Amazon ECS\n cluster. For more information, see Launching an Amazon ECS container\n instance in the Amazon Elastic Container Service Developer Guide.

\n \n

To create a compute environment that uses EKS resources, the caller must have\n permissions to call eks:DescribeCluster.

\n
\n \n

Batch doesn't automatically upgrade the AMIs in a compute environment after it's\n created. For example, it also doesn't update the AMIs in your compute environment when a\n newer version of the Amazon ECS optimized AMI is available. You're responsible for the management\n of the guest operating system. This includes any updates and security patches. You're also\n responsible for any additional application software or utilities that you install on the\n compute resources. There are two ways to use a new AMI for your Batch jobs. The original\n method is to complete these steps:

\n
    \n
  1. \n

    Create a new compute environment with the new AMI.

    \n
  2. \n
  3. \n

    Add the compute environment to an existing job queue.

    \n
  4. \n
  5. \n

    Remove the earlier compute environment from your job queue.

    \n
  6. \n
  7. \n

    Delete the earlier compute environment.

    \n
  8. \n
\n

In April 2022, Batch added enhanced support for updating compute environments. For\n more information, see Updating compute environments.\n To use the enhanced updating of compute environments to update AMIs, follow these\n rules:

\n
    \n
  • \n

    Either don't set the service role (serviceRole) parameter or set it to\n the AWSBatchServiceRole service-linked role.

    \n
  • \n
  • \n

    Set the allocation strategy (allocationStrategy) parameter to\n BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED, or\n SPOT_PRICE_CAPACITY_OPTIMIZED.

    \n
  • \n
  • \n

    Set the update to latest image version (updateToLatestImageVersion)\n parameter to true. The updateToLatestImageVersion parameter \n is used when you update a compute environment. This parameter is ignored when you create \n a compute environment.

    \n
  • \n
  • \n

    Don't specify an AMI ID in imageId, imageIdOverride (in\n \n ec2Configuration\n ), or in the launch template\n (launchTemplate). In that case, Batch selects the latest Amazon ECS\n optimized AMI that's supported by Batch at the time the infrastructure update is\n initiated. Alternatively, you can specify the AMI ID in the imageId or\n imageIdOverride parameters, or the launch template identified by the\n LaunchTemplate properties. Changing any of these properties starts an\n infrastructure update. If the AMI ID is specified in the launch template, it can't be\n replaced by specifying an AMI ID in either the imageId or\n imageIdOverride parameters. It can only be replaced by specifying a\n different launch template, or if the launch template version is set to\n $Default or $Latest, by setting either a new default version\n for the launch template (if $Default) or by adding a new version to the\n launch template (if $Latest).

    \n
  • \n
\n

If these rules are followed, any update that starts an infrastructure update causes the\n AMI ID to be re-selected. If the version setting in the launch template\n (launchTemplate) is set to $Latest or $Default, the\n latest or default version of the launch template is evaluated up at the time of the\n infrastructure update, even if the launchTemplate wasn't updated.

\n
", "smithy.api#examples": [ { "title": "To create a managed EC2 compute environment", @@ -3989,6 +3989,15 @@ "smithy.api#documentation": "

The properties for a task definition that describes the container and volume definitions of\n an Amazon ECS task. You can specify which Docker images to use, the required resources, and other\n configurations related to launching the task definition through an Amazon ECS service or task.

" } }, + "com.amazonaws.batch#EksAnnotationsMap": { + "type": "map", + "key": { + "target": "com.amazonaws.batch#String" + }, + "value": { + "target": "com.amazonaws.batch#String" + } + }, "com.amazonaws.batch#EksAttemptContainerDetail": { "type": "structure", "members": { @@ -4420,6 +4429,12 @@ "smithy.api#documentation": "

The path on the container where the volume is mounted.

" } }, + "subPath": { + "target": "com.amazonaws.batch#String", + "traits": { + "smithy.api#documentation": "

A sub-path inside the referenced volume instead of its root.

" + } + }, "readOnly": { "target": "com.amazonaws.batch#Boolean", "traits": { @@ -4503,10 +4518,44 @@ "traits": { "smithy.api#documentation": "

Key-value pairs used to identify, sort, and organize cube resources. Can contain up to 63\n uppercase letters, lowercase letters, numbers, hyphens (-), and underscores (_). Labels can be\n added or modified at any time. Each resource can have multiple labels, but each key must be\n unique for a given object.

" } + }, + "annotations": { + "target": "com.amazonaws.batch#EksAnnotationsMap", + "traits": { + "smithy.api#documentation": "

Key-value pairs used to attach arbitrary, non-identifying metadata to Kubernetes objects. \n Valid annotation keys have two segments: an optional prefix and a name, separated by a \n slash (/).

\n
    \n
  • \n

    The prefix is optional and must be 253 characters or less. If specified, the prefix \n must be a DNS subdomain− a series of DNS labels separated by dots (.), and it must \n end with a slash (/).

    \n
  • \n
  • \n

    The name segment is required and must be 63 characters or less. It can include alphanumeric \n characters ([a-z0-9A-Z]), dashes (-), underscores (_), and dots (.), but must begin and end \n with an alphanumeric character.

    \n
  • \n
\n \n

Annotation values must be 255 characters or less.

\n
\n

Annotations can be added or modified at any time. Each resource can have multiple annotations.

" + } + }, + "namespace": { + "target": "com.amazonaws.batch#String", + "traits": { + "smithy.api#documentation": "

The namespace of the Amazon EKS cluster. In Kubernetes, namespaces provide a mechanism for isolating \n groups of resources within a single cluster. Names of resources need to be unique within a namespace, \n but not across namespaces. Batch places Batch Job pods in this namespace. If this field is provided, \n the value can't be empty or null. It must meet the following requirements:

\n
    \n
  • \n

    1-63 characters long

    \n
  • \n
  • \n

    Can't be set to default

    \n
  • \n
  • \n

    Can't start with kube\n

    \n
  • \n
  • \n

    Must match the following regular expression:\n ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$\n

    \n
  • \n
\n

\n For more information, see \n Namespaces in the Kubernetes documentation. This namespace can be \n different from the kubernetesNamespace set in the compute environment's \n EksConfiguration, but must have identical role-based access control (RBAC) roles as \n the compute environment's kubernetesNamespace. For multi-node parallel jobs,\n the same value must be provided across all the node ranges.

" + } } }, "traits": { - "smithy.api#documentation": "

Describes and uniquely identifies Kubernetes resources. For example, the compute environment that\n a pod runs in or the jobID for a job running in the pod. For more information, see\n Understanding Kubernetes Objects in the Kubernetes documentation.

" + "smithy.api#documentation": "

Describes and uniquely identifies Kubernetes resources. For example, the compute environment that\n a pod runs in or the jobID for a job running in the pod. For more information, see\n \n Understanding Kubernetes Objects in the Kubernetes documentation.

" + } + }, + "com.amazonaws.batch#EksPersistentVolumeClaim": { + "type": "structure", + "members": { + "claimName": { + "target": "com.amazonaws.batch#String", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The name of the persistentVolumeClaim bounded to a persistentVolume. \n For more information, see \n Persistent Volume Claims in the Kubernetes documentation.

", + "smithy.api#required": {} + } + }, + "readOnly": { + "target": "com.amazonaws.batch#Boolean", + "traits": { + "smithy.api#documentation": "

An optional boolean value indicating if the mount is read only. Default is false. For more\n information, see \n Read Only Mounts in the Kubernetes documentation.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A persistentVolumeClaim volume is used to mount a PersistentVolume\n into a Pod. PersistentVolumeClaims are a way for users to \"claim\" durable storage without knowing \n the details of the particular cloud environment. See the information about PersistentVolumes\n in the Kubernetes documentation.

" } }, "com.amazonaws.batch#EksPodProperties": { @@ -4557,7 +4606,7 @@ "metadata": { "target": "com.amazonaws.batch#EksMetadata", "traits": { - "smithy.api#documentation": "

Metadata about the\n Kubernetes\n pod. For\n more information, see Understanding Kubernetes Objects in the Kubernetes\n documentation.

" + "smithy.api#documentation": "

Metadata about the Kubernetes pod. For more information, see Understanding Kubernetes Objects in the Kubernetes\n documentation.

" } }, "shareProcessNamespace": { @@ -4663,7 +4712,7 @@ "metadata": { "target": "com.amazonaws.batch#EksMetadata", "traits": { - "smithy.api#documentation": "

Metadata about the\n overrides for the container that's used on the Amazon EKS pod.

" + "smithy.api#documentation": "

Metadata about the overrides for the container that's used on the Amazon EKS pod.

" } } }, @@ -4772,6 +4821,12 @@ "traits": { "smithy.api#documentation": "

Specifies the configuration of a Kubernetes secret volume. For more information, see\n secret in the\n Kubernetes documentation.

" } + }, + "persistentVolumeClaim": { + "target": "com.amazonaws.batch#EksPersistentVolumeClaim", + "traits": { + "smithy.api#documentation": "

Specifies the configuration of a Kubernetes persistentVolumeClaim bounded to a \n persistentVolume. For more information, see \n Persistent Volume Claims in the Kubernetes documentation.

" + } } }, "traits": { @@ -4812,7 +4867,7 @@ "onStatusReason": { "target": "com.amazonaws.batch#String", "traits": { - "smithy.api#documentation": "

Contains a glob pattern to match against the StatusReason returned for a job.\n The pattern can contain up to 512 characters. It can contain letters, numbers, periods (.),\n colons (:), and white spaces (including spaces or tabs).\n It can\n optionally end with an asterisk (*) so that only the start of the string needs to be an exact\n match.

" + "smithy.api#documentation": "

Contains a glob pattern to match against the StatusReason returned for a job.\n The pattern can contain up to 512 characters. It can contain letters, numbers, periods (.),\n colons (:), and white spaces (including spaces or tabs). It can optionally end with an asterisk (*) \n so that only the start of the string needs to be an exact match.

" } }, "onReason": { @@ -7021,13 +7076,13 @@ "operatingSystemFamily": { "target": "com.amazonaws.batch#String", "traits": { - "smithy.api#documentation": "

The operating system for the compute environment.\n Valid values are:\n LINUX (default), WINDOWS_SERVER_2019_CORE,\n WINDOWS_SERVER_2019_FULL, WINDOWS_SERVER_2022_CORE, and\n WINDOWS_SERVER_2022_FULL.

\n \n

The following parameters can’t be set for Windows containers: linuxParameters,\n privileged, user, ulimits,\n readonlyRootFilesystem,\n and efsVolumeConfiguration.

\n
\n \n

The Batch Scheduler checks\n the compute environments\n that are attached to the job queue before registering a task definition with\n Fargate. In this\n scenario, the job queue is where the job is submitted. If the job requires a\n Windows container and the first compute environment is LINUX, the compute\n environment is skipped and the next compute environment is checked until a Windows-based compute\n environment is found.

\n
\n \n

Fargate Spot is not supported for\n ARM64 and\n Windows-based containers on Fargate. A job queue will be blocked if a\n Fargate\n ARM64 or\n Windows job is submitted to a job queue with only Fargate Spot compute environments.\n However, you can attach both FARGATE and\n FARGATE_SPOT compute environments to the same job queue.

\n
" + "smithy.api#documentation": "

The operating system for the compute environment. Valid values are:\n LINUX (default), WINDOWS_SERVER_2019_CORE,\n WINDOWS_SERVER_2019_FULL, WINDOWS_SERVER_2022_CORE, and\n WINDOWS_SERVER_2022_FULL.

\n \n

The following parameters can’t be set for Windows containers: linuxParameters,\n privileged, user, ulimits,\n readonlyRootFilesystem, and efsVolumeConfiguration.

\n
\n \n

The Batch Scheduler checks the compute environments that are attached to the job queue before \n registering a task definition with Fargate. In this scenario, the job queue is where the job is \n submitted. If the job requires a Windows container and the first compute environment is LINUX, \n the compute environment is skipped and the next compute environment is checked until a Windows-based \n compute environment is found.

\n
\n \n

Fargate Spot is not supported for ARM64 and Windows-based containers on Fargate. \n A job queue will be blocked if a Fargate ARM64 or Windows job is submitted to a job \n queue with only Fargate Spot compute environments. However, you can attach both FARGATE and\n FARGATE_SPOT compute environments to the same job queue.

\n
" } }, "cpuArchitecture": { "target": "com.amazonaws.batch#String", "traits": { - "smithy.api#documentation": "

The vCPU architecture. The default value is X86_64. Valid values are\n X86_64 and ARM64.

\n \n

This parameter must be set to\n X86_64\n for Windows containers.

\n
\n \n

Fargate Spot is not supported for ARM64 and Windows-based containers on\n Fargate. A job queue will be blocked if a Fargate ARM64 or Windows job is\n submitted to a job queue with only Fargate Spot compute environments. However, you can attach\n both FARGATE and FARGATE_SPOT compute environments to the same job\n queue.

\n
" + "smithy.api#documentation": "

The vCPU architecture. The default value is X86_64. Valid values are\n X86_64 and ARM64.

\n \n

This parameter must be set to X86_64 for Windows containers.

\n
\n \n

Fargate Spot is not supported for ARM64 and Windows-based containers on\n Fargate. A job queue will be blocked if a Fargate ARM64 or Windows job is\n submitted to a job queue with only Fargate Spot compute environments. However, you can attach\n both FARGATE and FARGATE_SPOT compute environments to the same job\n queue.

\n
" } } }, @@ -7247,7 +7302,7 @@ "schedulingPriorityOverride": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "

The scheduling priority for the job. This only affects jobs in job queues with a fair\n share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower\n scheduling priority.\n This\n overrides any scheduling priority in the job definition and works only within a single share\n identifier.

\n

The minimum supported value is 0 and the maximum supported value is 9999.

" + "smithy.api#documentation": "

The scheduling priority for the job. This only affects jobs in job queues with a fair\n share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower\n scheduling priority. This overrides any scheduling priority in the job definition and works only \n within a single share identifier.

\n

The minimum supported value is 0 and the maximum supported value is 9999.

" } }, "arrayProperties": { diff --git a/models/bcm-pricing-calculator.json b/models/bcm-pricing-calculator.json index 459da874ef..e1576c9781 100644 --- a/models/bcm-pricing-calculator.json +++ b/models/bcm-pricing-calculator.json @@ -647,7 +647,7 @@ "min": 0, "max": 32 }, - "smithy.api#pattern": "^[-a-zA-Z0-9\\.\\-_:,]*$" + "smithy.api#pattern": "^[-a-zA-Z0-9\\.\\-_:, \\/()]*$" } }, "com.amazonaws.bcmpricingcalculator#BatchCreateBillScenarioCommitmentModification": { @@ -2395,6 +2395,7 @@ "template": "bill-estimate/{billEstimateId}" }, "aws.api#taggable": {}, + "aws.iam#disableConditionKeyInference": {}, "aws.iam#iamResource": { "name": "bill-estimate" }, @@ -2941,6 +2942,7 @@ "template": "bill-scenario/{billScenarioId}" }, "aws.api#taggable": {}, + "aws.iam#disableConditionKeyInference": {}, "aws.iam#iamResource": { "name": "bill-scenario" }, @@ -5710,7 +5712,7 @@ "min": 0, "max": 32 }, - "smithy.api#pattern": "^[-a-zA-Z0-9\\.\\-_:,]*$" + "smithy.api#pattern": "^[-a-zA-Z0-9\\.\\-_:, \\/()]*$" } }, "com.amazonaws.bcmpricingcalculator#PurchaseAgreementType": { @@ -6592,7 +6594,7 @@ "min": 0, "max": 128 }, - "smithy.api#pattern": "^[-a-zA-Z0-9\\.\\-_:,]*$" + "smithy.api#pattern": "^[-a-zA-Z0-9\\.\\-_:, \\/()]*$" } }, "com.amazonaws.bcmpricingcalculator#Uuid": { @@ -6763,6 +6765,7 @@ "template": "workload-estimate/{workloadEstimateId}" }, "aws.api#taggable": {}, + "aws.iam#disableConditionKeyInference": {}, "aws.iam#iamResource": { "name": "workload-estimate" }, diff --git a/models/cleanroomsml.json b/models/cleanroomsml.json index 04abfdca6d..3ed70e8c21 100644 --- a/models/cleanroomsml.json +++ b/models/cleanroomsml.json @@ -1034,6 +1034,9 @@ "traits": { "smithy.api#documentation": "

The protected SQL query parameters.

" } + }, + "sqlComputeConfiguration": { + "target": "com.amazonaws.cleanroomsml#ComputeConfiguration" } }, "traits": { @@ -9925,7 +9928,7 @@ "dataSource": { "target": "com.amazonaws.cleanroomsml#ModelInferenceDataSource", "traits": { - "smithy.api#documentation": "

Defines he data source that is used for the trained model inference job.

", + "smithy.api#documentation": "

Defines the data source that is used for the trained model inference job.

", "smithy.api#required": {} } }, diff --git a/models/cloud9.json b/models/cloud9.json index b5b96a0925..806df005d2 100644 --- a/models/cloud9.json +++ b/models/cloud9.json @@ -85,7 +85,7 @@ "name": "cloud9" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "Cloud9\n

Cloud9 is a collection of tools that you can use to code, build, run, test, debug, and\n release software in the cloud.

\n

For more information about Cloud9, see the Cloud9 User Guide.

\n

Cloud9 supports these operations:

\n
    \n
  • \n

    \n CreateEnvironmentEC2: Creates an Cloud9 development environment, launches\n an Amazon EC2 instance, and then connects from the instance to the environment.

    \n
  • \n
  • \n

    \n CreateEnvironmentMembership: Adds an environment member to an\n environment.

    \n
  • \n
  • \n

    \n DeleteEnvironment: Deletes an environment. If an Amazon EC2 instance is\n connected to the environment, also terminates the instance.

    \n
  • \n
  • \n

    \n DeleteEnvironmentMembership: Deletes an environment member from an\n environment.

    \n
  • \n
  • \n

    \n DescribeEnvironmentMemberships: Gets information about environment\n members for an environment.

    \n
  • \n
  • \n

    \n DescribeEnvironments: Gets information about environments.

    \n
  • \n
  • \n

    \n DescribeEnvironmentStatus: Gets status information for an\n environment.

    \n
  • \n
  • \n

    \n ListEnvironments: Gets a list of environment identifiers.

    \n
  • \n
  • \n

    \n ListTagsForResource: Gets the tags for an environment.

    \n
  • \n
  • \n

    \n TagResource: Adds tags to an environment.

    \n
  • \n
  • \n

    \n UntagResource: Removes tags from an environment.

    \n
  • \n
  • \n

    \n UpdateEnvironment: Changes the settings of an existing\n environment.

    \n
  • \n
  • \n

    \n UpdateEnvironmentMembership: Changes the settings of an existing\n environment member for an environment.

    \n
  • \n
", + "smithy.api#documentation": "Cloud9\n

Cloud9 is a collection of tools that you can use to code, build, run, test, debug, and\n release software in the cloud.

\n

For more information about Cloud9, see the Cloud9 User Guide.

\n \n

Cloud9 is no longer available to new customers. Existing customers of \n Cloud9 can continue to use the service as normal. \n Learn more\"\n

\n
\n

Cloud9 supports these operations:

\n
    \n
  • \n

    \n CreateEnvironmentEC2: Creates an Cloud9 development environment, launches\n an Amazon EC2 instance, and then connects from the instance to the environment.

    \n
  • \n
  • \n

    \n CreateEnvironmentMembership: Adds an environment member to an\n environment.

    \n
  • \n
  • \n

    \n DeleteEnvironment: Deletes an environment. If an Amazon EC2 instance is\n connected to the environment, also terminates the instance.

    \n
  • \n
  • \n

    \n DeleteEnvironmentMembership: Deletes an environment member from an\n environment.

    \n
  • \n
  • \n

    \n DescribeEnvironmentMemberships: Gets information about environment\n members for an environment.

    \n
  • \n
  • \n

    \n DescribeEnvironments: Gets information about environments.

    \n
  • \n
  • \n

    \n DescribeEnvironmentStatus: Gets status information for an\n environment.

    \n
  • \n
  • \n

    \n ListEnvironments: Gets a list of environment identifiers.

    \n
  • \n
  • \n

    \n ListTagsForResource: Gets the tags for an environment.

    \n
  • \n
  • \n

    \n TagResource: Adds tags to an environment.

    \n
  • \n
  • \n

    \n UntagResource: Removes tags from an environment.

    \n
  • \n
  • \n

    \n UpdateEnvironment: Changes the settings of an existing\n environment.

    \n
  • \n
  • \n

    \n UpdateEnvironmentMembership: Changes the settings of an existing\n environment member for an environment.

    \n
  • \n
", "smithy.api#title": "AWS Cloud9", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -1116,7 +1116,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Cloud9 development environment, launches an Amazon Elastic Compute Cloud (Amazon EC2) instance, and\n then connects from the instance to the environment.

", + "smithy.api#documentation": "

Creates an Cloud9 development environment, launches an Amazon Elastic Compute Cloud (Amazon EC2) instance, and\n then connects from the instance to the environment.

\n \n

Cloud9 is no longer available to new customers. Existing customers of \n Cloud9 can continue to use the service as normal. \n Learn more\"\n

\n
", "smithy.api#examples": [ { "title": "CreateEnvironmentEC2", @@ -1176,7 +1176,7 @@ "imageId": { "target": "com.amazonaws.cloud9#ImageId", "traits": { - "smithy.api#documentation": "

The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance.\n To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM)\n path.

\n

From December 04, 2023, you will be required to include the imageId parameter\n for the CreateEnvironmentEC2 action. This change will be reflected across all\n direct methods of communicating with the API, such as Amazon Web Services SDK, Amazon Web Services CLI and Amazon Web Services\n CloudFormation. This change will only affect direct API consumers, and not Cloud9 console\n users.

\n

We recommend using Amazon Linux 2023 as the AMI to create your environment as it is fully\n supported.

\n

Since Ubuntu 18.04 has ended standard support as of May 31, 2023, we recommend you choose Ubuntu 22.04.

\n

\n AMI aliases \n

\n
    \n
  • \n

    Amazon Linux 2: amazonlinux-2-x86_64\n

    \n
  • \n
  • \n

    Amazon Linux 2023 (recommended): amazonlinux-2023-x86_64\n

    \n
  • \n
  • \n

    Ubuntu 18.04: ubuntu-18.04-x86_64\n

    \n
  • \n
  • \n

    Ubuntu 22.04: ubuntu-22.04-x86_64\n

    \n
  • \n
\n

\n SSM paths\n

\n
    \n
  • \n

    Amazon Linux 2:\n resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64\n

    \n
  • \n
  • \n

    Amazon Linux 2023 (recommended): resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2023-x86_64\n

    \n
  • \n
  • \n

    Ubuntu 18.04:\n resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64\n

    \n
  • \n
  • \n

    Ubuntu 22.04:\n resolve:ssm:/aws/service/cloud9/amis/ubuntu-22.04-x86_64\n

    \n
  • \n
", + "smithy.api#documentation": "

The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance.\n To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM)\n path.

\n

\n

We recommend using Amazon Linux 2023 as the AMI to create your environment as it is fully\n supported.

\n

From December 16, 2024, Ubuntu 18.04 will be removed from the list of available\n imageIds for Cloud9. This change is necessary as Ubuntu 18.04 has ended standard\n support on May 31, 2023. This change will only affect direct API consumers, and not Cloud9\n console users.

\n

Since Ubuntu 18.04 has ended standard support as of May 31, 2023, we recommend you choose\n Ubuntu 22.04.

\n

\n AMI aliases \n

\n
    \n
  • \n

    Amazon Linux 2: amazonlinux-2-x86_64\n

    \n
  • \n
  • \n

    Amazon Linux 2023 (recommended): amazonlinux-2023-x86_64\n

    \n
  • \n
  • \n

    Ubuntu 18.04: ubuntu-18.04-x86_64\n

    \n
  • \n
  • \n

    Ubuntu 22.04: ubuntu-22.04-x86_64\n

    \n
  • \n
\n

\n SSM paths\n

\n
    \n
  • \n

    Amazon Linux 2:\n resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64\n

    \n
  • \n
  • \n

    Amazon Linux 2023 (recommended):\n resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2023-x86_64\n

    \n
  • \n
  • \n

    Ubuntu 18.04:\n resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64\n

    \n
  • \n
  • \n

    Ubuntu 22.04:\n resolve:ssm:/aws/service/cloud9/amis/ubuntu-22.04-x86_64\n

    \n
  • \n
", "smithy.api#required": {} } }, @@ -1261,7 +1261,7 @@ } ], "traits": { - "smithy.api#documentation": "

Adds an environment member to an Cloud9 development environment.

", + "smithy.api#documentation": "

Adds an environment member to an Cloud9 development environment.

\n \n

Cloud9 is no longer available to new customers. Existing customers of \n Cloud9 can continue to use the service as normal. \n Learn more\"\n

\n
", "smithy.api#examples": [ { "title": "CreateEnvironmentMembership", @@ -1360,7 +1360,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an Cloud9 development environment. If an Amazon EC2 instance is connected to the\n environment, also terminates the instance.

", + "smithy.api#documentation": "

Deletes an Cloud9 development environment. If an Amazon EC2 instance is connected to the\n environment, also terminates the instance.

\n \n

Cloud9 is no longer available to new customers. Existing customers of \n Cloud9 can continue to use the service as normal. \n Learn more\"\n

\n
", "smithy.api#examples": [ { "title": "DeleteEnvironment", @@ -1406,7 +1406,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an environment member from a development environment.

", + "smithy.api#documentation": "

Deletes an environment member from a development environment.

\n \n

Cloud9 is no longer available to new customers. Existing customers of \n Cloud9 can continue to use the service as normal. \n Learn more\"\n

\n
", "smithy.api#examples": [ { "title": "DeleteEnvironmentMembership", @@ -1504,31 +1504,8 @@ } ], "traits": { - "smithy.api#documentation": "

Gets information about environment members for an Cloud9 development environment.

", + "smithy.api#documentation": "

Gets information about environment members for an Cloud9 development environment.

\n \n

Cloud9 is no longer available to new customers. Existing customers of \n Cloud9 can continue to use the service as normal. \n Learn more\"\n

\n
", "smithy.api#examples": [ - { - "title": "DescribeEnvironmentMemberships1", - "documentation": "The following example gets information about all of the environment members for the specified development environment.", - "input": { - "environmentId": "8d9967e2f0624182b74e7690ad69ebEX" - }, - "output": { - "memberships": [ - { - "environmentId": "8d9967e2f0624182b74e7690ad69ebEX", - "permissions": "read-write", - "userArn": "arn:aws:iam::123456789012:user/AnotherDemoUser", - "userId": "AIDAJ3BA6O2FMJWCWXHEX" - }, - { - "environmentId": "8d9967e2f0624182b74e7690ad69ebEX", - "permissions": "owner", - "userArn": "arn:aws:iam::123456789012:user/MyDemoUser", - "userId": "AIDAJNUEDQAQWFELJDLEX" - } - ] - } - }, { "title": "DescribeEnvironmentMemberships2", "documentation": "The following example gets information about the owner of the specified development environment.", @@ -1573,6 +1550,29 @@ } ] } + }, + { + "title": "DescribeEnvironmentMemberships1", + "documentation": "The following example gets information about all of the environment members for the specified development environment.", + "input": { + "environmentId": "8d9967e2f0624182b74e7690ad69ebEX" + }, + "output": { + "memberships": [ + { + "environmentId": "8d9967e2f0624182b74e7690ad69ebEX", + "permissions": "read-write", + "userArn": "arn:aws:iam::123456789012:user/AnotherDemoUser", + "userId": "AIDAJ3BA6O2FMJWCWXHEX" + }, + { + "environmentId": "8d9967e2f0624182b74e7690ad69ebEX", + "permissions": "owner", + "userArn": "arn:aws:iam::123456789012:user/MyDemoUser", + "userId": "AIDAJNUEDQAQWFELJDLEX" + } + ] + } } ], "smithy.api#paginated": { @@ -1672,7 +1672,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets status information for an Cloud9 development environment.

", + "smithy.api#documentation": "

Gets status information for an Cloud9 development environment.

\n \n

Cloud9 is no longer available to new customers. Existing customers of \n Cloud9 can continue to use the service as normal. \n Learn more\"\n

\n
", "smithy.api#examples": [ { "title": "DescribeEnvironmentStatus", @@ -1757,7 +1757,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets information about Cloud9 development environments.

", + "smithy.api#documentation": "

Gets information about Cloud9 development environments.

\n \n

Cloud9 is no longer available to new customers. Existing customers of \n Cloud9 can continue to use the service as normal. \n Learn more\"\n

\n
", "smithy.api#examples": [ { "title": "DescribeEnvironments", @@ -2228,7 +2228,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets a list of Cloud9 development environment identifiers.

", + "smithy.api#documentation": "

Gets a list of Cloud9 development environment identifiers.

\n \n

Cloud9 is no longer available to new customers. Existing customers of \n Cloud9 can continue to use the service as normal. \n Learn more\"\n

\n
\n \n

Cloud9 is no longer available to new customers. Existing customers of \n Cloud9 can continue to use the service as normal. \n Learn more\"\n

\n
", "smithy.api#examples": [ { "title": "ListEnvironments", @@ -2308,7 +2308,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets a list of the tags associated with an Cloud9 development environment.

" + "smithy.api#documentation": "

Gets a list of the tags associated with an Cloud9 development environment.

\n \n

Cloud9 is no longer available to new customers. Existing customers of \n Cloud9 can continue to use the service as normal. \n Learn more\"\n

\n
" } }, "com.amazonaws.cloud9#ListTagsForResourceRequest": { @@ -2602,7 +2602,7 @@ } ], "traits": { - "smithy.api#documentation": "

Adds tags to an Cloud9 development environment.

\n \n

Tags that you add to an Cloud9 environment by using this method will NOT be\n automatically propagated to underlying resources.

\n
" + "smithy.api#documentation": "

Adds tags to an Cloud9 development environment.

\n \n

Cloud9 is no longer available to new customers. Existing customers of \n Cloud9 can continue to use the service as normal. \n Learn more\"\n

\n
\n \n

Tags that you add to an Cloud9 environment by using this method will NOT be\n automatically propagated to underlying resources.

\n
" } }, "com.amazonaws.cloud9#TagResourceRequest": { @@ -2691,7 +2691,7 @@ } ], "traits": { - "smithy.api#documentation": "

Removes tags from an Cloud9 development environment.

" + "smithy.api#documentation": "

Removes tags from an Cloud9 development environment.

\n \n

Cloud9 is no longer available to new customers. Existing customers of \n Cloud9 can continue to use the service as normal. \n Learn more\"\n

\n
" } }, "com.amazonaws.cloud9#UntagResourceRequest": { @@ -2755,7 +2755,7 @@ } ], "traits": { - "smithy.api#documentation": "

Changes the settings of an existing Cloud9 development environment.

", + "smithy.api#documentation": "

Changes the settings of an existing Cloud9 development environment.

\n \n

Cloud9 is no longer available to new customers. Existing customers of \n Cloud9 can continue to use the service as normal. \n Learn more\"\n

\n
", "smithy.api#examples": [ { "title": "UpdateEnvironment", @@ -2803,7 +2803,7 @@ } ], "traits": { - "smithy.api#documentation": "

Changes the settings of an existing environment member for an Cloud9 development\n environment.

", + "smithy.api#documentation": "

Changes the settings of an existing environment member for an Cloud9 development\n environment.

\n \n

Cloud9 is no longer available to new customers. Existing customers of \n Cloud9 can continue to use the service as normal. \n Learn more\"\n

\n
", "smithy.api#examples": [ { "title": "UpdateEnvironmentMembership", diff --git a/models/cloudfront.json b/models/cloudfront.json index 389634425b..e5652f6458 100644 --- a/models/cloudfront.json +++ b/models/cloudfront.json @@ -5553,13 +5553,13 @@ "OriginReadTimeout": { "target": "com.amazonaws.cloudfront#integer", "traits": { - "smithy.api#documentation": "

Specifies how long, in seconds, CloudFront waits for a response from the origin. This is\n\t\t\talso known as the origin response timeout. The minimum timeout is 1\n\t\t\tsecond, the maximum is 60 seconds, and the default (if you don't specify otherwise) is\n\t\t\t30 seconds.

\n

For more information, see Origin Response Timeout in the\n\t\t\t\tAmazon CloudFront Developer Guide.

" + "smithy.api#documentation": "

Specifies how long, in seconds, CloudFront waits for a response from the origin. This is\n\t\t\talso known as the origin response timeout. The minimum timeout is 1\n\t\t\tsecond, the maximum is 60 seconds, and the default (if you don't specify otherwise) is\n\t\t\t30 seconds.

\n

For more information, see Response timeout (custom origins only) in the\n\t\t\t\tAmazon CloudFront Developer Guide.

" } }, "OriginKeepaliveTimeout": { "target": "com.amazonaws.cloudfront#integer", "traits": { - "smithy.api#documentation": "

Specifies how long, in seconds, CloudFront persists its connection to the origin. The\n\t\t\tminimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't\n\t\t\tspecify otherwise) is 5 seconds.

\n

For more information, see Origin Keep-alive Timeout in the\n\t\t\t\tAmazon CloudFront Developer Guide.

" + "smithy.api#documentation": "

Specifies how long, in seconds, CloudFront persists its connection to the origin. The\n\t\t\tminimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't\n\t\t\tspecify otherwise) is 5 seconds.

\n

For more information, see Keep-alive timeout (custom origins only) in the\n\t\t\t\tAmazon CloudFront Developer Guide.

" } } }, @@ -7079,7 +7079,7 @@ "DefaultRootObject": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "

The object that you want CloudFront to request from your origin (for example,\n\t\t\t\tindex.html) when a viewer requests the root URL for your distribution\n\t\t\t\t(https://www.example.com) instead of an object in your distribution\n\t\t\t\t(https://www.example.com/product-description.html). Specifying a\n\t\t\tdefault root object avoids exposing the contents of your distribution.

\n

Specify only the object name, for example, index.html. Don't add a\n\t\t\t\t/ before the object name.

\n

If you don't want to specify a default root object when you create a distribution,\n\t\t\tinclude an empty DefaultRootObject element.

\n

To delete the default root object from an existing distribution, update the\n\t\t\tdistribution configuration and include an empty DefaultRootObject\n\t\t\telement.

\n

To replace the default root object, update the distribution configuration and specify\n\t\t\tthe new object.

\n

For more information about the default root object, see Creating a\n\t\t\t\tDefault Root Object in the Amazon CloudFront Developer Guide.

" + "smithy.api#documentation": "

When a viewer requests the root URL for your distribution, the default root object is the\n\t\t\tobject that you want CloudFront to request from your origin. For example, if your root URL is\n\t\t\t\thttps://www.example.com, you can specify CloudFront to return the\n\t\t\t\tindex.html file as the default root object. You can specify a default\n\t\t\troot object so that viewers see a specific file or object, instead of another object in\n\t\t\tyour distribution (for example,\n\t\t\t\thttps://www.example.com/product-description.html). A default root\n\t\t\tobject avoids exposing the contents of your distribution.

\n

You can specify the object name or a path to the object name (for example,\n\t\t\t\tindex.html or exampleFolderName/index.html). Your string\n\t\t\tcan't begin with a forward slash (/). Only specify the object name or the\n\t\t\tpath to the object.

\n

If you don't want to specify a default root object when you create a distribution,\n\t\t\tinclude an empty DefaultRootObject element.

\n

To delete the default root object from an existing distribution, update the\n\t\t\tdistribution configuration and include an empty DefaultRootObject\n\t\t\telement.

\n

To replace the default root object, update the distribution configuration and specify\n\t\t\tthe new object.

\n

For more information about the default root object, see Specify a default root object in the Amazon CloudFront Developer Guide.

" } }, "Origins": { @@ -20477,6 +20477,18 @@ "smithy.api#documentation": "

The VPC origin ID.

", "smithy.api#required": {} } + }, + "OriginReadTimeout": { + "target": "com.amazonaws.cloudfront#integer", + "traits": { + "smithy.api#documentation": "

Specifies how long, in seconds, CloudFront waits for a response from the origin. This is\n\t\t\talso known as the origin response timeout. The minimum timeout is 1\n\t\t\tsecond, the maximum is 60 seconds, and the default (if you don't specify otherwise) is\n\t\t\t30 seconds.

\n

For more information, see Response timeout (custom origins only) in the\n\t\t\tAmazon CloudFront Developer Guide.

" + } + }, + "OriginKeepaliveTimeout": { + "target": "com.amazonaws.cloudfront#integer", + "traits": { + "smithy.api#documentation": "

Specifies how long, in seconds, CloudFront persists its connection to the origin. The\n\t\t\tminimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't\n\t\t\tspecify otherwise) is 5 seconds.

\n

For more information, see Keep-alive timeout (custom origins only) in the\n\t\t\tAmazon CloudFront Developer Guide.

" + } } }, "traits": { diff --git a/models/cloudhsm-v2.json b/models/cloudhsm-v2.json index bd920e5178..18c34751c0 100644 --- a/models/cloudhsm-v2.json +++ b/models/cloudhsm-v2.json @@ -1350,6 +1350,18 @@ "smithy.api#error": "client" } }, + "com.amazonaws.cloudhsmv2#CloudHsmResourceLimitExceededException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.cloudhsmv2#errorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

The request was rejected because it exceeds an CloudHSM limit.

", + "smithy.api#error": "client" + } + }, "com.amazonaws.cloudhsmv2#CloudHsmResourceNotFoundException": { "type": "structure", "members": { @@ -1467,6 +1479,12 @@ "smithy.api#documentation": "

The identifier (ID) of the virtual private cloud (VPC) that contains the\n cluster.

" } }, + "NetworkType": { + "target": "com.amazonaws.cloudhsmv2#NetworkType", + "traits": { + "smithy.api#documentation": "

The cluster's NetworkType can be set to either IPV4 (which is the default) or DUALSTACK.\n When set to IPV4, communication between your application and the Hardware Security Modules (HSMs) is restricted to the IPv4 protocol only.\n In contrast, the DUALSTACK network type enables communication over both the IPv4 and IPv6 protocols.\n To use the DUALSTACK option, you'll need to configure your Virtual Private Cloud (VPC) and subnets to support both IPv4 and IPv6. This involves adding IPv6 Classless Inter-Domain Routing (CIDR) blocks to the existing IPv4 CIDR blocks in your subnets.\n The choice between IPV4 and DUALSTACK network types determines the flexibility of the network addressing setup for your cluster. The DUALSTACK option provides more flexibility by allowing both IPv4 and IPv6 communication.

" + } + }, "Certificates": { "target": "com.amazonaws.cloudhsmv2#Certificates", "traits": { @@ -1552,6 +1570,18 @@ "smithy.api#enumValue": "UPDATE_IN_PROGRESS" } }, + "MODIFY_IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MODIFY_IN_PROGRESS" + } + }, + "ROLLBACK_IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ROLLBACK_IN_PROGRESS" + } + }, "DELETE_IN_PROGRESS": { "target": "smithy.api#Unit", "traits": { @@ -1722,6 +1752,12 @@ "smithy.api#required": {} } }, + "NetworkType": { + "target": "com.amazonaws.cloudhsmv2#NetworkType", + "traits": { + "smithy.api#documentation": "

The NetworkType to create a cluster with. The allowed values are\n IPV4 and DUALSTACK.\n

" + } + }, "TagList": { "target": "com.amazonaws.cloudhsmv2#TagList", "traits": { @@ -2208,7 +2244,20 @@ "inputToken": "NextToken", "outputToken": "NextToken", "pageSize": "MaxResults" - } + }, + "smithy.test#smokeTests": [ + { + "id": "DescribeClustersSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.cloudhsmv2#DescribeClustersRequest": { @@ -2421,6 +2470,12 @@ "smithy.api#documentation": "

The IP address of the HSM's elastic network interface (ENI).

" } }, + "EniIpV6": { + "target": "com.amazonaws.cloudhsmv2#IpV6Address", + "traits": { + "smithy.api#documentation": "

The IPv6 address (if any) of the HSM's elastic network interface (ENI).

" + } + }, "HsmId": { "target": "com.amazonaws.cloudhsmv2#HsmId", "traits": { @@ -2586,6 +2641,15 @@ "smithy.api#pattern": "^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$" } }, + "com.amazonaws.cloudhsmv2#IpV6Address": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 100 + } + } + }, "com.amazonaws.cloudhsmv2#ListTags": { "type": "operation", "input": { @@ -2804,6 +2868,23 @@ "smithy.api#output": {} } }, + "com.amazonaws.cloudhsmv2#NetworkType": { + "type": "enum", + "members": { + "IPV4": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IPV4" + } + }, + "DUALSTACK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DUALSTACK" + } + } + } + }, "com.amazonaws.cloudhsmv2#NextToken": { "type": "string", "traits": { @@ -3088,6 +3169,9 @@ { "target": "com.amazonaws.cloudhsmv2#CloudHsmInvalidRequestException" }, + { + "target": "com.amazonaws.cloudhsmv2#CloudHsmResourceLimitExceededException" + }, { "target": "com.amazonaws.cloudhsmv2#CloudHsmResourceNotFoundException" }, diff --git a/models/cloudtrail.json b/models/cloudtrail.json index eaa156e77a..cd57d4a614 100644 --- a/models/cloudtrail.json +++ b/models/cloudtrail.json @@ -251,7 +251,7 @@ } }, "traits": { - "smithy.api#documentation": "

Advanced event selectors let you create fine-grained selectors for CloudTrail management, data, and network activity events. They help you control costs by logging only those\n events that are important to you. For more information about configuring advanced event selectors, see\n the Logging data events, Logging network activity events, and Logging management events topics in the CloudTrail User Guide.

\n

You cannot apply both event selectors and advanced event selectors to a trail.

\n

\n Supported CloudTrail event record fields for management events\n

\n
    \n
  • \n

    \n eventCategory (required)

    \n
  • \n
  • \n

    \n eventSource\n

    \n
  • \n
  • \n

    \n readOnly\n

    \n
  • \n
\n

The following additional fields are available for event data stores:

\n
    \n
  • \n

    \n eventName\n

    \n
  • \n
  • \n

    \n eventType\n

    \n
  • \n
  • \n

    \n sessionCredentialFromConsole\n

    \n
  • \n
  • \n

    \n userIdentity.arn\n

    \n
  • \n
\n

\n Supported CloudTrail event record fields for data events\n

\n
    \n
  • \n

    \n eventCategory (required)

    \n
  • \n
  • \n

    \n resources.type (required)

    \n
  • \n
  • \n

    \n readOnly\n

    \n
  • \n
  • \n

    \n eventName\n

    \n
  • \n
  • \n

    \n resources.ARN\n

    \n
  • \n
\n

The following additional fields are available for event data stores:

\n
    \n
  • \n

    \n eventSource\n

    \n
  • \n
  • \n

    \n eventType\n

    \n
  • \n
  • \n

    \n sessionCredentialFromConsole\n

    \n
  • \n
  • \n

    \n userIdentity.arn\n

    \n
  • \n
\n

\n Supported CloudTrail event record fields for network activity events\n

\n \n

Network activity events is in preview release for CloudTrail and is subject to change.

\n
\n
    \n
  • \n

    \n eventCategory (required)

    \n
  • \n
  • \n

    \n eventSource (required)

    \n
  • \n
  • \n

    \n eventName\n

    \n
  • \n
  • \n

    \n errorCode - The only valid value for errorCode is VpceAccessDenied.

    \n
  • \n
  • \n

    \n vpcEndpointId\n

    \n
  • \n
\n \n

For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the only supported field is\n eventCategory.

\n
" + "smithy.api#documentation": "

Advanced event selectors let you create fine-grained selectors for CloudTrail management, data, and network activity events. They help you control costs by logging only those\n events that are important to you. For more information about configuring advanced event selectors, see\n the Logging data events, Logging network activity events, and Logging management events topics in the CloudTrail User Guide.

\n

You cannot apply both event selectors and advanced event selectors to a trail.

\n

For information about configurable advanced event selector fields, see \n AdvancedEventSelector \n in the CloudTrailUser Guide.

" } }, "com.amazonaws.cloudtrail#AdvancedEventSelectors": { @@ -266,7 +266,7 @@ "Field": { "target": "com.amazonaws.cloudtrail#SelectorField", "traits": { - "smithy.api#documentation": "

A field in a CloudTrail event record on which to filter events to be logged. For\n event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for\n selecting events as filtering is not supported.

\n

For CloudTrail management events, supported fields include\n eventCategory (required), eventSource, and\n readOnly. The following additional fields are available for event data\n stores: eventName, eventType,\n sessionCredentialFromConsole, and userIdentity.arn.

\n

For CloudTrail data events, supported fields include eventCategory\n (required), resources.type (required), eventName,\n readOnly, and resources.ARN. The following additional fields\n are available for event data stores: eventSource, eventType,\n sessionCredentialFromConsole, and userIdentity.arn.

\n

For CloudTrail network activity events, supported fields include eventCategory (required), eventSource (required), eventName,\n errorCode, and vpcEndpointId.

\n

For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the only supported field is\n eventCategory.

\n
    \n
  • \n

    \n \n readOnly\n - This is an optional field that is only used for management events and data events. This field can be set to\n Equals with a value of true or false. If you do\n not add this field, CloudTrail logs both read and\n write events. A value of true logs only\n read events. A value of false logs only\n write events.

    \n
  • \n
  • \n

    \n \n eventSource\n - This field is only used for management events, data events (for event data stores only), and network activity events.

    \n

    For management events for trails, this is an optional field that can be set to NotEquals\n kms.amazonaws.com to exclude KMS management events, or NotEquals\n rdsdata.amazonaws.com to exclude RDS management events.

    \n

    For management and data events for event data stores, you can use it to include or\n exclude any event source and can use any operator.

    \n

    For network activity events, this is a required field that only uses the\n Equals operator. Set this field to the event source for which you want to\n log network activity events. If you want to log network activity events for multiple\n event sources, you must create a separate field selector for each event\n source.

    \n

    The following are valid values for network activity events:

    \n
      \n
    • \n

      \n cloudtrail.amazonaws.com\n

      \n
    • \n
    • \n

      \n ec2.amazonaws.com\n

      \n
    • \n
    • \n

      \n kms.amazonaws.com\n

      \n
    • \n
    • \n

      \n secretsmanager.amazonaws.com\n

      \n
    • \n
    \n
  • \n
  • \n

    \n \n eventName\n - This is an optional field that is only used for data events, management events (for event data stores only), and network activity events. You can use any operator with \n eventName. You can use it to filter in or filter out specific events. You can have\n multiple values for this field, separated by commas.

    \n
  • \n
  • \n

    \n \n eventCategory\n - This field is required and\n must be set to Equals. \n

    \n
      \n
    • \n

      \n For CloudTrail management events, the value\n must be Management. \n

      \n
    • \n
    • \n

      \n For CloudTrail data events, the value\n must be Data. \n

      \n
    • \n
    • \n

      \n For CloudTrail network activity events, the value\n must be NetworkActivity. \n

      \n
    • \n
    \n

    The following are used only for event data stores:

    \n
      \n
    • \n

      \n For CloudTrail Insights events, the value\n must be Insight. \n

      \n
    • \n
    • \n

      \n For Config\n configuration items, the value must be ConfigurationItem.\n

      \n
    • \n
    • \n

      \n For Audit Manager evidence, the value must be Evidence.\n

      \n
    • \n
    • \n

      \n For events outside of Amazon Web Services, the value must be ActivityAuditLog.\n

      \n
    • \n
    \n
  • \n
  • \n

    \n \n eventType\n - This is an optional\n field available only for event data stores, which is used to filter management and\n data events on the event type. For information about available event types, see\n CloudTrail record contents in the CloudTrail user\n guide.

    \n
  • \n
  • \n

    \n \n errorCode\n - This field is only used to filter CloudTrail network activity events\n and is optional. This is the error code to filter on. Currently, the only valid errorCode is VpceAccessDenied. \n errorCode can only use the Equals operator.

    \n
  • \n
  • \n

    \n \n sessionCredentialFromConsole\n - This\n is an optional field available only for event data stores, which is used to filter\n management and data events based on whether the events originated from an Amazon Web Services Management Console session. sessionCredentialFromConsole can only use the\n Equals and NotEquals operators.

    \n
  • \n
  • \n

    \n \n resources.type\n - This field is\n required for CloudTrail data events. resources.type can only\n use the Equals operator.

    \n

    For a list of available resource types for data events, see Data events in the CloudTrail User Guide.

    \n

    You can have only one resources.type field per selector. To log events on more than one resource type, add another selector.

    \n
  • \n
  • \n

    \n \n resources.ARN\n - The resources.ARN is an optional field for \n data events. You can use any\n operator with resources.ARN, but if you use Equals or\n NotEquals, the value must exactly match the ARN of a valid resource\n of the type you've specified in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, \n use the StartsWith operator, and include only the bucket ARN as the matching value.

    \n

    For information about filtering data events on the resources.ARN field, see \n Filtering data \n events by resources.ARN in the CloudTrail User Guide.

    \n \n

    You can't use the resources.ARN field to filter resource types that do not have ARNs.

    \n
    \n
  • \n
  • \n

    \n \n userIdentity.arn\n - This is an\n optional field available only for event data stores, which is used to filter\n management and data events on the userIdentity ARN. You can use any operator with\n userIdentity.arn. For more information on the userIdentity element,\n see CloudTrail userIdentity element in the CloudTrail User Guide.

    \n
  • \n
  • \n

    \n \n vpcEndpointId\n - This field is only used to filter CloudTrail network activity events\n and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with vpcEndpointId.

    \n
  • \n
", + "smithy.api#documentation": "

A field in a CloudTrail event record on which to filter events to be logged. For\n event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for\n selecting events as filtering is not supported.

\n

For more information, see \n AdvancedFieldSelector \n in the CloudTrailUser Guide.

", "smithy.api#required": {} } }, diff --git a/models/cloudwatch-logs.json b/models/cloudwatch-logs.json index b0595b8e02..d4a1f0549b 100644 --- a/models/cloudwatch-logs.json +++ b/models/cloudwatch-logs.json @@ -5741,7 +5741,7 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 256 + "max": 50 }, "smithy.api#pattern": "^[\\.\\-_/#A-Za-z0-9]+$" } @@ -5751,7 +5751,7 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 256 + "max": 50 }, "smithy.api#pattern": "^[\\.\\-_/#A-Za-z0-9]+$" } diff --git a/models/codepipeline.json b/models/codepipeline.json index 2181eea8e1..edbae106e5 100644 --- a/models/codepipeline.json +++ b/models/codepipeline.json @@ -1155,7 +1155,7 @@ "category": { "target": "com.amazonaws.codepipeline#ActionCategory", "traits": { - "smithy.api#documentation": "

A category defines what kind of action can be taken in the stage, and constrains\n the provider type for the action. Valid categories are limited to one of the following\n values.

\n
    \n
  • \n

    Source

    \n
  • \n
  • \n

    Build

    \n
  • \n
  • \n

    Test

    \n
  • \n
  • \n

    Deploy

    \n
  • \n
  • \n

    Invoke

    \n
  • \n
  • \n

    Approval

    \n
  • \n
", + "smithy.api#documentation": "

A category defines what kind of action can be taken in the stage, and constrains\n the provider type for the action. Valid categories are limited to one of the following\n values.

\n
    \n
  • \n

    Source

    \n
  • \n
  • \n

    Build

    \n
  • \n
  • \n

    Test

    \n
  • \n
  • \n

    Deploy

    \n
  • \n
  • \n

    Invoke

    \n
  • \n
  • \n

    Approval

    \n
  • \n
  • \n

    Compute

    \n
  • \n
", "smithy.api#required": {} } }, @@ -2955,7 +2955,7 @@ } }, "traits": { - "smithy.api#documentation": "

The condition for the stage. A condition is made up of the rules and the result for\n the condition.

" + "smithy.api#documentation": "

The condition for the stage. A condition is made up of the rules and the result for\n the condition. For more information about conditions, see Stage conditions.\n For more information about rules, see the CodePipeline rule\n reference.

" } }, "com.amazonaws.codepipeline#ConditionExecution": { @@ -4024,7 +4024,7 @@ "category": { "target": "com.amazonaws.codepipeline#ActionCategory", "traits": { - "smithy.api#documentation": "

Defines what kind of action can be taken in the stage. The following are the valid\n values:

\n
    \n
  • \n

    \n Source\n

    \n
  • \n
  • \n

    \n Build\n

    \n
  • \n
  • \n

    \n Test\n

    \n
  • \n
  • \n

    \n Deploy\n

    \n
  • \n
  • \n

    \n Approval\n

    \n
  • \n
  • \n

    \n Invoke\n

    \n
  • \n
", + "smithy.api#documentation": "

Defines what kind of action can be taken in the stage. The following are the valid\n values:

\n
    \n
  • \n

    \n Source\n

    \n
  • \n
  • \n

    \n Build\n

    \n
  • \n
  • \n

    \n Test\n

    \n
  • \n
  • \n

    \n Deploy\n

    \n
  • \n
  • \n

    \n Approval\n

    \n
  • \n
  • \n

    \n Invoke\n

    \n
  • \n
  • \n

    \n Compute\n

    \n
  • \n
", "smithy.api#required": {} } }, @@ -5611,7 +5611,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the rules for the condition.

" + "smithy.api#documentation": "

Lists the rules for the condition. For more information about conditions, see Stage\n conditions. For more information about rules, see the CodePipeline rule reference.

" } }, "com.amazonaws.codepipeline#ListRuleTypesInput": { @@ -8057,7 +8057,7 @@ "name": { "target": "com.amazonaws.codepipeline#RuleName", "traits": { - "smithy.api#documentation": "

The name of the rule that is created for the condition, such as\n CheckAllResults.

", + "smithy.api#documentation": "

The name of the rule that is created for the condition, such as\n VariableCheck.

", "smithy.api#required": {} } }, @@ -8074,6 +8074,12 @@ "smithy.api#documentation": "

The action configuration fields for the rule.

" } }, + "commands": { + "target": "com.amazonaws.codepipeline#CommandList", + "traits": { + "smithy.api#documentation": "

The shell commands to run with your commands rule in CodePipeline. All commands\n are supported except multi-line formats. While CodeBuild logs and permissions\n are used, you do not need to create any resources in CodeBuild.

\n \n

Using compute time for this action will incur separate charges in CodeBuild.

\n
" + } + }, "inputArtifacts": { "target": "com.amazonaws.codepipeline#InputArtifactList", "traits": { @@ -8100,7 +8106,7 @@ } }, "traits": { - "smithy.api#documentation": "

Represents information about the rule to be created for an associated condition. An\n example would be creating a new rule for an entry condition, such as a rule that checks\n for a test result before allowing the run to enter the deployment stage.

" + "smithy.api#documentation": "

Represents information about the rule to be created for an associated condition. An\n example would be creating a new rule for an entry condition, such as a rule that checks\n for a test result before allowing the run to enter the deployment stage. For more\n information about conditions, see Stage conditions.\n For more information about rules, see the CodePipeline rule\n reference.

" } }, "com.amazonaws.codepipeline#RuleDeclarationList": { diff --git a/models/cognito-identity-provider.json b/models/cognito-identity-provider.json index bcbe261af9..172de5b1d8 100644 --- a/models/cognito-identity-provider.json +++ b/models/cognito-identity-provider.json @@ -396,7 +396,7 @@ "name": "cognito-idp" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "

With the Amazon Cognito user pools API, you can configure user pools and authenticate users. To\n authenticate users from third-party identity providers (IdPs) in this API, you can\n link IdP users to native user profiles. Learn more\n about the authentication and authorization of federated users at Adding user pool sign-in through a third party and in the User pool federation endpoints and hosted UI reference.

\n

This API reference provides detailed information about API operations and object types\n in Amazon Cognito.

\n

Along with resource management operations, the Amazon Cognito user pools API includes classes\n of operations and authorization models for client-side and server-side authentication of\n users. You can interact with operations in the Amazon Cognito user pools API as any of the\n following subjects.

\n
    \n
  1. \n

    An administrator who wants to configure user pools, app clients, users,\n groups, or other user pool functions.

    \n
  2. \n
  3. \n

    A server-side app, like a web application, that wants to use its Amazon Web Services\n privileges to manage, authenticate, or authorize a user.

    \n
  4. \n
  5. \n

    A client-side app, like a mobile app, that wants to make unauthenticated\n requests to manage, authenticate, or authorize a user.

    \n
  6. \n
\n

For more information, see Using the Amazon Cognito user pools API and user pool endpoints\n in the Amazon Cognito Developer Guide.

\n

With your Amazon Web Services SDK, you can build the logic to support operational flows in every use\n case for this API. You can also make direct REST API requests to Amazon Cognito user pools service endpoints. The following links can get you started\n with the CognitoIdentityProvider client in other supported Amazon Web Services\n SDKs.

\n \n

To get started with an Amazon Web Services SDK, see Tools to Build on Amazon Web Services. For example actions and scenarios, see Code examples for Amazon Cognito Identity Provider using Amazon Web Services\n SDKs.

", + "smithy.api#documentation": "

With the Amazon Cognito user pools API, you can configure user pools and authenticate users. To\n authenticate users from third-party identity providers (IdPs) in this API, you can\n link IdP users to native user profiles. Learn more\n about the authentication and authorization of federated users at Adding user pool sign-in through a third party and in the User pool federation endpoints and hosted UI reference.

\n

This API reference provides detailed information about API operations and object types\n in Amazon Cognito.

\n

Along with resource management operations, the Amazon Cognito user pools API includes classes\n of operations and authorization models for client-side and server-side authentication of\n users. You can interact with operations in the Amazon Cognito user pools API as any of the\n following subjects.

\n
    \n
  1. \n

    An administrator who wants to configure user pools, app clients, users,\n groups, or other user pool functions.

    \n
  2. \n
  3. \n

    A server-side app, like a web application, that wants to use its Amazon Web Services\n privileges to manage, authenticate, or authorize a user.

    \n
  4. \n
  5. \n

    A client-side app, like a mobile app, that wants to make unauthenticated\n requests to manage, authenticate, or authorize a user.

    \n
  6. \n
\n

For more information, see Using the Amazon Cognito user pools API and user pool endpoints\n in the Amazon Cognito Developer Guide.

\n

With your Amazon Web Services SDK, you can build the logic to support operational flows in every use\n case for this API. You can also make direct REST API requests to Amazon Cognito user pools service endpoints. The following links can get you started\n with the CognitoIdentityProvider client in other supported Amazon Web Services\n SDKs.

\n \n

To get started with an Amazon Web Services SDK, see Tools to Build on Amazon Web Services. For example actions and scenarios, see Code examples for Amazon Cognito Identity Provider using Amazon Web Services\n SDKs.

", "smithy.api#title": "Amazon Cognito Identity Provider", "smithy.api#xmlNamespace": { "uri": "http://cognito-idp.amazonaws.com/doc/2016-04-18/" @@ -1461,7 +1461,7 @@ } ], "traits": { - "smithy.api#documentation": "

Adds additional user attributes to the user pool schema.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Adds additional user attributes to the user pool schema. Custom attributes can be\n mutable or immutable and have a custom: or dev: prefix. For\n more information, see Custom attributes.

\n

You can also create custom attributes in the Schema parameter of CreateUserPool and\n UpdateUserPool. You can't delete custom attributes after you\n create them.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AddCustomAttributesRequest": { @@ -1470,14 +1470,14 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool where you want to add custom attributes.

", + "smithy.api#documentation": "

The ID of the user pool where you want to add custom attributes.

", "smithy.api#required": {} } }, "CustomAttributes": { "target": "com.amazonaws.cognitoidentityprovider#CustomAttributesListType", "traits": { - "smithy.api#documentation": "

An array of custom attributes, such as Mutable and Name.

", + "smithy.api#documentation": "

An array of custom attribute names and other properties. Sets the following\n characteristics:

\n
\n
AttributeDataType
\n
\n

The expected data type. Can be a string, a number, a date and time, or a\n boolean.

\n
\n
Mutable
\n
\n

If true, you can grant app clients write access to the attribute value. If\n false, the attribute value can only be set up on sign-up or administrator\n creation of users.

\n
\n
Name
\n
\n

The attribute name. For an attribute like custom:myAttribute,\n enter myAttribute for this field.

\n
\n
Required
\n
\n

When true, users who sign up or are created must set a value for the\n attribute.

\n
\n
NumberAttributeConstraints
\n
\n

The minimum and maximum length of accepted values for a\n Number-type attribute.

\n
\n
StringAttributeConstraints
\n
\n

The minimum and maximum length of accepted values for a\n String-type attribute.

\n
\n
DeveloperOnlyAttribute
\n
\n

This legacy option creates an attribute with a dev: prefix.\n You can only set the value of a developer-only attribute with administrative\n IAM credentials.

\n
\n
", "smithy.api#required": {} } } @@ -1533,7 +1533,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool.

", + "smithy.api#documentation": "

The ID of the user pool that contains the group that you want to add the user\n to.

", "smithy.api#required": {} } }, @@ -1600,7 +1600,7 @@ } ], "traits": { - "smithy.api#documentation": "

This IAM-authenticated API operation confirms user sign-up as an administrator.\n Unlike ConfirmSignUp, your IAM credentials authorize user account confirmation.\n No confirmation code is required.

\n

This request sets a user account active in a user pool that requires confirmation of new user accounts before they can sign in. You can\n configure your user pool to not send confirmation codes to new users and instead confirm\n them with this API operation on the back end.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Confirms user sign-up as an administrator. Unlike ConfirmSignUp, your IAM credentials authorize user account confirmation.\n No confirmation code is required.

\n

This request sets a user account active in a user pool that requires confirmation of new user accounts before they can sign in. You can\n configure your user pool to not send confirmation codes to new users and instead confirm\n them with this API operation on the back end.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
\n

To configure your user pool to require administrative confirmation of users, set\n AllowAdminCreateUserOnly to true in a\n CreateUserPool or UpdateUserPool request.

" } }, "com.amazonaws.cognitoidentityprovider#AdminConfirmSignUpRequest": { @@ -1609,7 +1609,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for which you want to confirm user registration.

", + "smithy.api#documentation": "

The ID of the user pool where you want to confirm a user's sign-up\n request.

", "smithy.api#required": {} } }, @@ -1623,7 +1623,7 @@ "ClientMetadata": { "target": "com.amazonaws.cognitoidentityprovider#ClientMetadataType", "traits": { - "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

If your user pool configuration includes triggers, the AdminConfirmSignUp API action\n invokes the Lambda function that is specified for the post\n confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON\n payload, which the function receives as input. In this payload, the\n clientMetadata attribute provides the data that you assigned to the\n ClientMetadata parameter in your AdminConfirmSignUp request. In your function code in\n Lambda, you can process the ClientMetadata value to enhance your workflow for your\n specific needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only to Lambda\n triggers that are assigned to a user pool to support custom workflows. If\n your user pool configuration doesn't include triggers, the ClientMetadata\n parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive\n information.

    \n
  • \n
\n
" + "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

If your user pool configuration includes triggers, the AdminConfirmSignUp API action\n invokes the Lambda function that is specified for the post\n confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON\n payload, which the function receives as input. In this payload, the\n clientMetadata attribute provides the data that you assigned to the\n ClientMetadata parameter in your AdminConfirmSignUp request. In your function code in\n Lambda, you can process the ClientMetadata value to enhance your workflow for your\n specific needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only\n to Lambda triggers that are assigned to a user pool to support custom\n workflows. If your user pool configuration doesn't include triggers, the\n ClientMetadata parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't send sensitive\n information in this parameter.

    \n
  • \n
\n
" } } }, @@ -1792,7 +1792,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool where the user will be created.

", + "smithy.api#documentation": "

The ID of the user pool where you want to create a user.

", "smithy.api#required": {} } }, @@ -1825,25 +1825,25 @@ "target": "com.amazonaws.cognitoidentityprovider#ForceAliasCreation", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

This parameter is used only if the phone_number_verified or\n email_verified attribute is set to True. Otherwise, it is\n ignored.

\n

If this parameter is set to True and the phone number or email address\n specified in the UserAttributes parameter already exists as an alias with a different\n user, the API call will migrate the alias from the previous user to the newly created\n user. The previous user will no longer be able to log in using that alias.

\n

If this parameter is set to False, the API throws an\n AliasExistsException error if the alias already exists. The default\n value is False.

" + "smithy.api#documentation": "

This parameter is used only if the phone_number_verified or\n email_verified attribute is set to True. Otherwise, it is\n ignored.

\n

If this parameter is set to True and the phone number or email address\n specified in the UserAttributes parameter already exists as an alias with a\n different user, this request migrates the alias from the previous user to the\n newly-created user. The previous user will no longer be able to log in using that\n alias.

\n

If this parameter is set to False, the API throws an\n AliasExistsException error if the alias already exists. The default\n value is False.

" } }, "MessageAction": { "target": "com.amazonaws.cognitoidentityprovider#MessageActionType", "traits": { - "smithy.api#documentation": "

Set to RESEND to resend the invitation message to a user that already\n exists and reset the expiration limit on the user's account. Set to\n SUPPRESS to suppress sending the message. You can specify only one\n value.

" + "smithy.api#documentation": "

Set to RESEND to resend the invitation message to a user that already\n exists, and to reset the temporary-password duration with a new temporary password. Set\n to SUPPRESS to suppress sending the message. You can specify only one\n value.

" } }, "DesiredDeliveryMediums": { "target": "com.amazonaws.cognitoidentityprovider#DeliveryMediumListType", "traits": { - "smithy.api#documentation": "

Specify \"EMAIL\" if email will be used to send the welcome message.\n Specify \"SMS\" if the phone number will be used. The default value is\n \"SMS\". You can specify more than one value.

" + "smithy.api#documentation": "

Specify EMAIL if email will be used to send the welcome message. Specify\n SMS if the phone number will be used. The default value is\n SMS. You can specify more than one value.

" } }, "ClientMetadata": { "target": "com.amazonaws.cognitoidentityprovider#ClientMetadataType", "traits": { - "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool triggers.\n When you use the AdminCreateUser API action, Amazon Cognito invokes the function that is assigned\n to the pre sign-up trigger. When Amazon Cognito invokes this function, it\n passes a JSON payload, which the function receives as input. This payload contains a\n clientMetadata attribute, which provides the data that you assigned to\n the ClientMetadata parameter in your AdminCreateUser request. In your function code in\n Lambda, you can process the clientMetadata value to enhance your\n workflow for your specific needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only to Lambda\n triggers that are assigned to a user pool to support custom workflows. If\n your user pool configuration doesn't include triggers, the ClientMetadata\n parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive\n information.

    \n
  • \n
\n
" + "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool triggers.\n When you use the AdminCreateUser API action, Amazon Cognito invokes the function that is assigned\n to the pre sign-up trigger. When Amazon Cognito invokes this function, it\n passes a JSON payload, which the function receives as input. This payload contains a\n ClientMetadata attribute, which provides the data that you assigned to\n the ClientMetadata parameter in your AdminCreateUser request. In your function code in\n Lambda, you can process the clientMetadata value to enhance your\n workflow for your specific needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only\n to Lambda triggers that are assigned to a user pool to support custom\n workflows. If your user pool configuration doesn't include triggers, the\n ClientMetadata parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't send sensitive\n information in this parameter.

    \n
  • \n
\n
" } } }, @@ -1858,7 +1858,7 @@ "User": { "target": "com.amazonaws.cognitoidentityprovider#UserType", "traits": { - "smithy.api#documentation": "

The newly created user.

" + "smithy.api#documentation": "

The new user's profile details.

" } } }, @@ -1906,7 +1906,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a user as an administrator. Works on any user.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Deletes a user profile in your user pool.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AdminDeleteUserAttributes": { @@ -1938,7 +1938,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the user attributes in a user pool as an administrator. Works on any\n user.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Deletes attribute values from a user. This operation doesn't affect tokens for\n existing user sessions. The next ID token that the user receives will no longer have\n this attribute.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AdminDeleteUserAttributesRequest": { @@ -1947,7 +1947,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool where you want to delete user attributes.

", + "smithy.api#documentation": "

The ID of the user pool where you want to delete user attributes.

", "smithy.api#required": {} } }, @@ -1985,7 +1985,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool where you want to delete the user.

", + "smithy.api#documentation": "

The ID of the user pool where you want to delete the user.

", "smithy.api#required": {} } }, @@ -2043,14 +2043,14 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#StringType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool.

", + "smithy.api#documentation": "

The ID of the user pool where you want to delete the user's linked\n identities.

", "smithy.api#required": {} } }, "User": { "target": "com.amazonaws.cognitoidentityprovider#ProviderUserIdentifierType", "traits": { - "smithy.api#documentation": "

The user to be disabled.

", + "smithy.api#documentation": "

The user profile that you want to delete a linked identity from.

", "smithy.api#required": {} } } @@ -2095,7 +2095,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deactivates a user and revokes all access tokens for the user. A deactivated user\n can't sign in, but still appears in the responses to GetUser and\n ListUsers API requests.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Deactivates a user profile and revokes all access tokens for the user. A deactivated\n user can't sign in, but still appears in the responses to ListUsers\n API requests.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AdminDisableUserRequest": { @@ -2104,7 +2104,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool where you want to disable the user.

", + "smithy.api#documentation": "

The ID of the user pool where you want to disable the user.

", "smithy.api#required": {} } }, @@ -2158,7 +2158,7 @@ } ], "traits": { - "smithy.api#documentation": "

Enables the specified user as an administrator. Works on any user.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Activate sign-in for a user profile that previously had sign-in access\n disabled.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AdminEnableUserRequest": { @@ -2167,7 +2167,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool where you want to enable the user.

", + "smithy.api#documentation": "

The ID of the user pool where you want to activate sign-in for the user.

", "smithy.api#required": {} } }, @@ -2224,7 +2224,7 @@ } ], "traits": { - "smithy.api#documentation": "

Forgets the device, as an administrator.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Forgets, or deletes, a remembered device from a user's profile. After you forget\n the device, the user can no longer complete device authentication with that device and\n when applicable, must submit MFA codes again. For more information, see Working with devices.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AdminForgetDeviceRequest": { @@ -2233,7 +2233,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID.

", + "smithy.api#documentation": "

The ID of the user pool where the device owner is a user.

", "smithy.api#required": {} } }, @@ -2247,7 +2247,7 @@ "DeviceKey": { "target": "com.amazonaws.cognitoidentityprovider#DeviceKeyType", "traits": { - "smithy.api#documentation": "

The device key.

", + "smithy.api#documentation": "

The key ID of the device that you want to delete. You can get device keys in the\n response to an AdminListDevices request.

", "smithy.api#required": {} } } @@ -2286,7 +2286,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets the device, as an administrator.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Given the device key, returns details for a user' device. For more information,\n see Working with devices.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AdminGetDeviceRequest": { @@ -2295,14 +2295,14 @@ "DeviceKey": { "target": "com.amazonaws.cognitoidentityprovider#DeviceKeyType", "traits": { - "smithy.api#documentation": "

The device key.

", + "smithy.api#documentation": "

The key of the device that you want to delete. You can get device IDs in the response\n to an AdminListDevices request.

", "smithy.api#required": {} } }, "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID.

", + "smithy.api#documentation": "

The ID of the user pool where the device owner is a user.

", "smithy.api#required": {} } }, @@ -2325,7 +2325,7 @@ "Device": { "target": "com.amazonaws.cognitoidentityprovider#DeviceType", "traits": { - "smithy.api#documentation": "

The device.

", + "smithy.api#documentation": "

Details of the requested device. Includes device information, last-accessed and\n created dates, and the device key.

", "smithy.api#required": {} } } @@ -2364,7 +2364,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets the specified user by user name in a user pool as an administrator. Works on any\n user. This operation contributes to your monthly active user (MAU) count for the purpose\n of billing.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Given the username, returns details about a user profile in a user pool. This\n operation contributes to your monthly active user (MAU) count for the purpose of\n billing. You can specify alias attributes in the Username parameter.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AdminGetUserRequest": { @@ -2373,7 +2373,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool where you want to get information about the\n user.

", + "smithy.api#documentation": "

The ID of the user pool where you want to get information about the user.

", "smithy.api#required": {} } }, @@ -2403,13 +2403,13 @@ "UserAttributes": { "target": "com.amazonaws.cognitoidentityprovider#AttributeListType", "traits": { - "smithy.api#documentation": "

An array of name-value pairs representing user attributes.

" + "smithy.api#documentation": "

An array of name-value pairs of user attributes and their values, for example\n \"email\": \"testuser@example.com\".

" } }, "UserCreateDate": { "target": "com.amazonaws.cognitoidentityprovider#DateType", "traits": { - "smithy.api#documentation": "

The date the user was created.

" + "smithy.api#documentation": "

The date and time when the item was created. Amazon Cognito returns this timestamp in UNIX epoch time format. Your SDK might render the output in a \nhuman-readable format like ISO 8601 or a Java Date object.

" } }, "UserLastModifiedDate": { @@ -2422,13 +2422,13 @@ "target": "com.amazonaws.cognitoidentityprovider#BooleanType", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Indicates that the status is enabled.

" + "smithy.api#documentation": "

Indicates whether the user is activated for sign-in. The AdminDisableUser and AdminEnableUser API operations deactivate and activate\n user sign-in, respectively.

" } }, "UserStatus": { "target": "com.amazonaws.cognitoidentityprovider#UserStatusType", "traits": { - "smithy.api#documentation": "

The user status. Can be one of the following:

\n
    \n
  • \n

    UNCONFIRMED - User has been created but not confirmed.

    \n
  • \n
  • \n

    CONFIRMED - User has been confirmed.

    \n
  • \n
  • \n

    UNKNOWN - User status isn't known.

    \n
  • \n
  • \n

    RESET_REQUIRED - User is confirmed, but the user must request a code and reset\n their password before they can sign in.

    \n
  • \n
  • \n

    FORCE_CHANGE_PASSWORD - The user is confirmed and the user can sign in using a\n temporary password, but on first sign-in, the user must change their password to\n a new value before doing anything else.

    \n
  • \n
" + "smithy.api#documentation": "

The user's status. Can be one of the following:

\n
    \n
  • \n

    UNCONFIRMED - User has been created but not confirmed.

    \n
  • \n
  • \n

    CONFIRMED - User has been confirmed.

    \n
  • \n
  • \n

    UNKNOWN - User status isn't known.

    \n
  • \n
  • \n

    RESET_REQUIRED - User is confirmed, but the user must request a code and reset\n their password before they can sign in.

    \n
  • \n
  • \n

    FORCE_CHANGE_PASSWORD - The user is confirmed and the user can sign in using a\n temporary password, but on first sign-in, the user must change their password to\n a new value before doing anything else.

    \n
  • \n
  • \n

    EXTERNAL_PROVIDER - The user signed in with a third-party identity\n provider.

    \n
  • \n
" } }, "MFAOptions": { @@ -2440,13 +2440,13 @@ "PreferredMfaSetting": { "target": "com.amazonaws.cognitoidentityprovider#StringType", "traits": { - "smithy.api#documentation": "

The user's preferred MFA setting.

" + "smithy.api#documentation": "

The user's preferred MFA. Users can prefer SMS message, email message, or TOTP\n MFA.

" } }, "UserMFASettingList": { "target": "com.amazonaws.cognitoidentityprovider#UserMFASettingListType", "traits": { - "smithy.api#documentation": "

The MFA options that are activated for the user. The possible values in this list are\n SMS_MFA, EMAIL_OTP, and\n SOFTWARE_TOKEN_MFA.

" + "smithy.api#documentation": "

The MFA options that are activated for the user. The possible values in this list are\n SMS_MFA, EMAIL_OTP, and SOFTWARE_TOKEN_MFA.\n You can change the MFA preference for users who have more than one available MFA factor\n with AdminSetUserMFAPreference or SetUserMFAPreference.

" } } }, @@ -2514,7 +2514,7 @@ } ], "traits": { - "smithy.api#documentation": "

Initiates the authentication flow, as an administrator.

\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Starts sign-in for applications with a server-side component, for example a\n traditional web application. This operation specifies the authentication flow that\n you'd like to begin. The authentication flow that you specify must be supported in\n your app client configuration. For more information about authentication flows, see\n Authentication flows.

\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AdminInitiateAuthRequest": { @@ -2523,21 +2523,21 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The ID of the Amazon Cognito user pool.

", + "smithy.api#documentation": "

The ID of the user pool where the user wants to sign in.

", "smithy.api#required": {} } }, "ClientId": { "target": "com.amazonaws.cognitoidentityprovider#ClientIdType", "traits": { - "smithy.api#documentation": "

The app client ID.

", + "smithy.api#documentation": "

The ID of the app client where the user wants to sign in.

", "smithy.api#required": {} } }, "AuthFlow": { "target": "com.amazonaws.cognitoidentityprovider#AuthFlowType", "traits": { - "smithy.api#documentation": "

The authentication flow that you want to initiate. The AuthParameters\n that you must submit are linked to the flow that you submit. For example:

\n
    \n
  • \n

    \n USER_AUTH: Request a preferred authentication type or review\n available authentication types. From the offered authentication types, select\n one in a challenge response and then authenticate with that method in an\n additional challenge response.

    \n
  • \n
  • \n

    \n REFRESH_TOKEN_AUTH: Receive new ID and access tokens when you\n pass a REFRESH_TOKEN parameter with a valid refresh token as the\n value.

    \n
  • \n
  • \n

    \n USER_SRP_AUTH: Receive secure remote password (SRP) variables for\n the next challenge, PASSWORD_VERIFIER, when you pass\n USERNAME and SRP_A parameters..

    \n
  • \n
  • \n

    \n ADMIN_USER_PASSWORD_AUTH: Receive new tokens or the next\n challenge, for example SOFTWARE_TOKEN_MFA, when you pass\n USERNAME and PASSWORD parameters.

    \n
  • \n
\n

Valid values include the following:

\n
\n
USER_AUTH
\n
\n

The entry point for sign-in with passwords, one-time passwords, biometric\n devices, and security keys.

\n
\n
USER_SRP_AUTH
\n
\n

Username-password authentication with the Secure Remote Password (SRP)\n protocol. For more information, see Use SRP password verification in custom\n authentication flow.

\n
\n
REFRESH_TOKEN_AUTH and REFRESH_TOKEN
\n
\n

Provide a valid refresh token and receive new ID and access tokens. For\n more information, see Using the refresh token.

\n
\n
CUSTOM_AUTH
\n
\n

Custom authentication with Lambda triggers. For more information, see\n Custom authentication challenge Lambda\n triggers.

\n
\n
ADMIN_USER_PASSWORD_AUTH
\n
\n

Username-password authentication with the password sent directly in the\n request. For more information, see Admin authentication flow.

\n
\n
\n

\n USER_PASSWORD_AUTH is a flow type of InitiateAuth and isn't valid for\n AdminInitiateAuth.

", + "smithy.api#documentation": "

The authentication flow that you want to initiate. Each AuthFlow has\n linked AuthParameters that you must submit. The following are some example\n flows and their parameters.

\n
    \n
  • \n

    \n USER_AUTH: Request a preferred authentication type or review\n available authentication types. From the offered authentication types, select\n one in a challenge response and then authenticate with that method in an\n additional challenge response.

    \n
  • \n
  • \n

    \n REFRESH_TOKEN_AUTH: Receive new ID and access tokens when you\n pass a REFRESH_TOKEN parameter with a valid refresh token as the\n value.

    \n
  • \n
  • \n

    \n USER_SRP_AUTH: Receive secure remote password (SRP) variables for\n the next challenge, PASSWORD_VERIFIER, when you pass\n USERNAME and SRP_A parameters..

    \n
  • \n
  • \n

    \n ADMIN_USER_PASSWORD_AUTH: Receive new tokens or the next\n challenge, for example SOFTWARE_TOKEN_MFA, when you pass\n USERNAME and PASSWORD parameters.

    \n
  • \n
\n

\n All flows\n

\n
\n
USER_AUTH
\n
\n

The entry point for sign-in with passwords, one-time passwords, and\n WebAuthN authenticators.

\n
\n
USER_SRP_AUTH
\n
\n

Username-password authentication with the Secure Remote Password (SRP)\n protocol. For more information, see Use SRP password verification in custom\n authentication flow.

\n
\n
REFRESH_TOKEN_AUTH and REFRESH_TOKEN
\n
\n

Provide a valid refresh token and receive new ID and access tokens. For\n more information, see Using the refresh token.

\n
\n
CUSTOM_AUTH
\n
\n

Custom authentication with Lambda triggers. For more information, see\n Custom authentication challenge Lambda\n triggers.

\n
\n
ADMIN_USER_PASSWORD_AUTH
\n
\n

Username-password authentication with the password sent directly in the\n request. For more information, see Admin authentication flow.

\n
\n
\n

\n USER_PASSWORD_AUTH is a flow type of InitiateAuth and isn't valid for\n AdminInitiateAuth.

", "smithy.api#required": {} } }, @@ -2550,25 +2550,25 @@ "ClientMetadata": { "target": "com.amazonaws.cognitoidentityprovider#ClientMetadataType", "traits": { - "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for certain custom\n workflows that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool triggers.\n When you use the AdminInitiateAuth API action, Amazon Cognito invokes the Lambda functions that\n are specified for various triggers. The ClientMetadata value is passed as input to the\n functions for only the following triggers:

\n
    \n
  • \n

    Pre signup

    \n
  • \n
  • \n

    Pre authentication

    \n
  • \n
  • \n

    User migration

    \n
  • \n
\n

When Amazon Cognito invokes the functions for these triggers, it passes a JSON payload, which\n the function receives as input. This payload contains a validationData\n attribute, which provides the data that you assigned to the ClientMetadata parameter in\n your AdminInitiateAuth request. In your function code in Lambda, you can process the\n validationData value to enhance your workflow for your specific\n needs.

\n

When you use the AdminInitiateAuth API action, Amazon Cognito also invokes the functions for\n the following triggers, but it doesn't provide the ClientMetadata value as input:

\n
    \n
  • \n

    Post authentication

    \n
  • \n
  • \n

    Custom message

    \n
  • \n
  • \n

    Pre token generation

    \n
  • \n
  • \n

    Create auth challenge

    \n
  • \n
  • \n

    Define auth challenge

    \n
  • \n
  • \n

    Custom email sender

    \n
  • \n
  • \n

    Custom SMS sender

    \n
  • \n
\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only to Lambda\n triggers that are assigned to a user pool to support custom workflows. If\n your user pool configuration doesn't include triggers, the ClientMetadata\n parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive\n information.

    \n
  • \n
\n
" + "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for certain custom\n workflows that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool triggers.\n When you use the AdminInitiateAuth API action, Amazon Cognito invokes the Lambda functions that\n are specified for various triggers. The ClientMetadata value is passed as input to the\n functions for only the following triggers:

\n
    \n
  • \n

    Pre signup

    \n
  • \n
  • \n

    Pre authentication

    \n
  • \n
  • \n

    User migration

    \n
  • \n
\n

When Amazon Cognito invokes the functions for these triggers, it passes a JSON payload, which\n the function receives as input. This payload contains a validationData\n attribute, which provides the data that you assigned to the ClientMetadata parameter in\n your AdminInitiateAuth request. In your function code in Lambda, you can process the\n validationData value to enhance your workflow for your specific\n needs.

\n

When you use the AdminInitiateAuth API action, Amazon Cognito also invokes the functions for\n the following triggers, but it doesn't provide the ClientMetadata value as input:

\n
    \n
  • \n

    Post authentication

    \n
  • \n
  • \n

    Custom message

    \n
  • \n
  • \n

    Pre token generation

    \n
  • \n
  • \n

    Create auth challenge

    \n
  • \n
  • \n

    Define auth challenge

    \n
  • \n
  • \n

    Custom email sender

    \n
  • \n
  • \n

    Custom SMS sender

    \n
  • \n
\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only\n to Lambda triggers that are assigned to a user pool to support custom\n workflows. If your user pool configuration doesn't include triggers, the\n ClientMetadata parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't send sensitive\n information in this parameter.

    \n
  • \n
\n
" } }, "AnalyticsMetadata": { "target": "com.amazonaws.cognitoidentityprovider#AnalyticsMetadataType", "traits": { - "smithy.api#documentation": "

The analytics metadata for collecting Amazon Pinpoint metrics for\n AdminInitiateAuth calls.

" + "smithy.api#documentation": "

The analytics metadata for collecting Amazon Pinpoint metrics.

" } }, "ContextData": { "target": "com.amazonaws.cognitoidentityprovider#ContextDataType", "traits": { - "smithy.api#documentation": "

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced \nsecurity evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito\nwhen it makes API requests.

" + "smithy.api#documentation": "

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced \nsecurity evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito\nwhen it makes API requests.

\n

For more information, see Collecting data for threat protection in\napplications.

" } }, "Session": { "target": "com.amazonaws.cognitoidentityprovider#SessionType", "traits": { - "smithy.api#documentation": "

The optional session ID from a ConfirmSignUp API request. You can sign in\n a user directly from the sign-up process with the USER_AUTH authentication\n flow.

" + "smithy.api#documentation": "

The optional session ID from a ConfirmSignUp API request. You can sign in\n a user directly from the sign-up process with an AuthFlow of\n USER_AUTH and AuthParameters of EMAIL_OTP or\n SMS_OTP, depending on how your user pool sent the confirmation-code\n message.

" } } }, @@ -2589,7 +2589,7 @@ "Session": { "target": "com.amazonaws.cognitoidentityprovider#SessionType", "traits": { - "smithy.api#documentation": "

The session that should be passed both ways in challenge-response calls to the\n service. If AdminInitiateAuth or AdminRespondToAuthChallenge\n API call determines that the caller must pass another challenge, they return a session\n with other challenge parameters. This session should be passed as it is to the next\n AdminRespondToAuthChallenge API call.

" + "smithy.api#documentation": "

The session that must be passed to challenge-response requests. If an\n AdminInitiateAuth or AdminRespondToAuthChallenge API\n request determines that the caller must pass another challenge, Amazon Cognito returns a session\n ID and the parameters of the next challenge. Pass this session Id in the\n Session parameter of AdminRespondToAuthChallenge.

" } }, "ChallengeParameters": { @@ -2601,7 +2601,7 @@ "AuthenticationResult": { "target": "com.amazonaws.cognitoidentityprovider#AuthenticationResultType", "traits": { - "smithy.api#documentation": "

The result of the authentication response. This is only returned if the caller doesn't\n need to pass another challenge. If the caller does need to pass another challenge before\n it gets tokens, ChallengeName, ChallengeParameters, and\n Session are returned.

" + "smithy.api#documentation": "

The outcome of successful authentication. This is only returned if the user pool has\n no additional challenges to return. If Amazon Cognito returns another challenge, the response\n includes ChallengeName, ChallengeParameters, and\n Session so that your user can answer the challenge.

" } } }, @@ -2654,7 +2654,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#StringType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool.

", + "smithy.api#documentation": "

The ID of the user pool where you want to link a federated identity.

", "smithy.api#required": {} } }, @@ -2713,7 +2713,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists a user's registered devices.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Lists a user's registered devices. Remembered devices are used in authentication\n services where you offer a \"Remember me\" option for users who you want to permit to sign\n in without MFA from a trusted device. Users can bypass MFA while your application\n performs device SRP authentication on the back end. For more information, see Working with devices.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AdminListDevicesRequest": { @@ -2722,7 +2722,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID.

", + "smithy.api#documentation": "

The ID of the user pool where the device owner is a user.

", "smithy.api#required": {} } }, @@ -2736,7 +2736,7 @@ "Limit": { "target": "com.amazonaws.cognitoidentityprovider#QueryLimitType", "traits": { - "smithy.api#documentation": "

The limit of the devices request.

" + "smithy.api#documentation": "

The maximum number of devices that you want Amazon Cognito to return in the response.

" } }, "PaginationToken": { @@ -2757,7 +2757,7 @@ "Devices": { "target": "com.amazonaws.cognitoidentityprovider#DeviceListType", "traits": { - "smithy.api#documentation": "

The devices in the list of devices response.

" + "smithy.api#documentation": "

An array of devices and their information. Each entry that's returned includes\n device information, last-accessed and created dates, and the device key.

" } }, "PaginationToken": { @@ -2801,7 +2801,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the groups that a user belongs to.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
", + "smithy.api#documentation": "

Lists the groups that a user belongs to. User pool groups are identifiers that you can\n reference from the contents of ID and access tokens, and set preferred IAM roles for\n identity-pool authentication. For more information, see Adding groups to a user pool.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -2823,20 +2823,20 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool.

", + "smithy.api#documentation": "

The ID of the user pool where you want to view a user's groups.

", "smithy.api#required": {} } }, "Limit": { "target": "com.amazonaws.cognitoidentityprovider#QueryLimitType", "traits": { - "smithy.api#documentation": "

The limit of the request to list groups.

" + "smithy.api#documentation": "

The maximum number of groups that you want Amazon Cognito to return in the response.

" } }, "NextToken": { "target": "com.amazonaws.cognitoidentityprovider#PaginationKey", "traits": { - "smithy.api#documentation": "

An identifier that was returned from the previous call to this operation, which can be\n used to return the next set of items in the list.

" + "smithy.api#documentation": "

This API operation returns a limited number of results. The pagination token is\nan identifier that you can present in an additional API request with the same parameters. When\nyou include the pagination token, Amazon Cognito returns the next set of items after the current list. \nSubsequent requests return a new pagination token. By use of this token, you can paginate \nthrough the full list of items.

" } } }, @@ -2850,13 +2850,13 @@ "Groups": { "target": "com.amazonaws.cognitoidentityprovider#GroupListType", "traits": { - "smithy.api#documentation": "

The groups that the user belongs to.

" + "smithy.api#documentation": "

An array of groups and information about them.

" } }, "NextToken": { "target": "com.amazonaws.cognitoidentityprovider#PaginationKey", "traits": { - "smithy.api#documentation": "

An identifier that was returned from the previous call to this operation, which can be\n used to return the next set of items in the list.

" + "smithy.api#documentation": "

The identifier that Amazon Cognito returned with the previous request to this operation. When \nyou include a pagination token in your request, Amazon Cognito returns the next set of items in \nthe list. By use of this token, you can paginate through the full list of items.

" } } }, @@ -2896,7 +2896,7 @@ } ], "traits": { - "smithy.api#documentation": "

A history of user activity and any risks detected as part of Amazon Cognito advanced\n security.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
", + "smithy.api#documentation": "

Requests a history of user activity and any risks detected as part of Amazon Cognito threat\n protection. For more information, see Viewing user event history.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -2911,7 +2911,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID.

", + "smithy.api#documentation": "

The Id of the user pool that contains the user profile with the logged events.

", "smithy.api#required": {} } }, @@ -2931,7 +2931,7 @@ "NextToken": { "target": "com.amazonaws.cognitoidentityprovider#PaginationKey", "traits": { - "smithy.api#documentation": "

A pagination token.

" + "smithy.api#documentation": "

This API operation returns a limited number of results. The pagination token is\nan identifier that you can present in an additional API request with the same parameters. When\nyou include the pagination token, Amazon Cognito returns the next set of items after the current list. \nSubsequent requests return a new pagination token. By use of this token, you can paginate \nthrough the full list of items.

" } } }, @@ -2951,7 +2951,7 @@ "NextToken": { "target": "com.amazonaws.cognitoidentityprovider#PaginationKey", "traits": { - "smithy.api#documentation": "

A pagination token.

" + "smithy.api#documentation": "

The identifier that Amazon Cognito returned with the previous request to this operation. When \nyou include a pagination token in your request, Amazon Cognito returns the next set of items in \nthe list. By use of this token, you can paginate through the full list of items.

" } } }, @@ -2988,7 +2988,7 @@ } ], "traits": { - "smithy.api#documentation": "

Removes the specified user from the specified group.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Given a username and a group name. removes them from the group. User pool groups are\n identifiers that you can reference from the contents of ID and access tokens, and set\n preferred IAM roles for identity-pool authentication. For more information, see Adding groups to a user pool.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AdminRemoveUserFromGroupRequest": { @@ -2997,7 +2997,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool.

", + "smithy.api#documentation": "

The ID of the user pool that contains the group and the user that you want to\n remove.

", "smithy.api#required": {} } }, @@ -3011,7 +3011,7 @@ "GroupName": { "target": "com.amazonaws.cognitoidentityprovider#GroupNameType", "traits": { - "smithy.api#documentation": "

The group name.

", + "smithy.api#documentation": "

The name of the group that you want to remove the user from, for example\n MyTestGroup.

", "smithy.api#required": {} } } @@ -3070,7 +3070,7 @@ } ], "traits": { - "smithy.api#documentation": "

Resets the specified user's password in a user pool as an administrator. Works on any\n user.

\n

To use this API operation, your user pool must have self-service account recovery\n configured. Use AdminSetUserPassword if you manage passwords as an administrator.

\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
\n

Deactivates a user's password, requiring them to change it. If a user tries to sign in\n after the API is called, Amazon Cognito responds with a\n PasswordResetRequiredException error. Your app must then perform the\n actions that reset your user's password: the forgot-password flow. In addition, if the\n user pool has phone verification selected and a verified phone number exists for the\n user, or if email verification is selected and a verified email exists for the user,\n calling this API will also result in sending a message to the end user with the code to\n change their password.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Resets the specified user's password in a user pool. This operation doesn't\n change the user's password, but sends a password-reset code. This operation is the\n administrative authentication API equivalent to ForgotPassword.

\n

This operation deactivates a user's password, requiring them to change it. If a user\n tries to sign in after the API request, Amazon Cognito responds with a\n PasswordResetRequiredException error. Your app must then complete the\n forgot-password flow by prompting the user for their code and a new password, then\n submitting those values in a ConfirmForgotPassword request. In addition, if the user\n pool has phone verification selected and a verified phone number exists for the user, or\n if email verification is selected and a verified email exists for the user, calling this\n API will also result in sending a message to the end user with the code to change their\n password.

\n

To use this API operation, your user pool must have self-service account recovery\n configured. Use AdminSetUserPassword if you manage passwords as an administrator.

\n \n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AdminResetUserPasswordRequest": { @@ -3079,7 +3079,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool where you want to reset the user's password.

", + "smithy.api#documentation": "

The ID of the user pool where you want to reset the user's password.

", "smithy.api#required": {} } }, @@ -3093,7 +3093,7 @@ "ClientMetadata": { "target": "com.amazonaws.cognitoidentityprovider#ClientMetadataType", "traits": { - "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool\n triggers. When you use the AdminResetUserPassword API action, Amazon Cognito invokes the function\n that is assigned to the custom message trigger. When Amazon Cognito invokes\n this function, it passes a JSON payload, which the function receives as input. This\n payload contains a clientMetadata attribute, which provides the data that\n you assigned to the ClientMetadata parameter in your AdminResetUserPassword request. In\n your function code in Lambda, you can process the\n clientMetadata value to enhance your workflow for your specific needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only to Lambda\n triggers that are assigned to a user pool to support custom workflows. If\n your user pool configuration doesn't include triggers, the ClientMetadata\n parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive\n information.

    \n
  • \n
\n
" + "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool\n triggers. The AdminResetUserPassword API operation invokes the function\n that is assigned to the custom message trigger. When Amazon Cognito invokes\n this function, it passes a JSON payload, which the function receives as input. This\n payload contains a clientMetadata attribute, which provides the data that\n you assigned to the ClientMetadata parameter in your AdminResetUserPassword request. In\n your function code in Lambda, you can process the\n clientMetadata value to enhance your workflow for your specific needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only\n to Lambda triggers that are assigned to a user pool to support custom\n workflows. If your user pool configuration doesn't include triggers, the\n ClientMetadata parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't send sensitive\n information in this parameter.

    \n
  • \n
\n
" } } }, @@ -3196,21 +3196,21 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The ID of the Amazon Cognito user pool.

", + "smithy.api#documentation": "

The ID of the user pool where you want to respond to an authentication\n challenge.

", "smithy.api#required": {} } }, "ClientId": { "target": "com.amazonaws.cognitoidentityprovider#ClientIdType", "traits": { - "smithy.api#documentation": "

The app client ID.

", + "smithy.api#documentation": "

The ID of the app client where you initiated sign-in.

", "smithy.api#required": {} } }, "ChallengeName": { "target": "com.amazonaws.cognitoidentityprovider#ChallengeNameType", "traits": { - "smithy.api#documentation": "

The challenge name. For more information, see AdminInitiateAuth.

", + "smithy.api#documentation": "

The name of the challenge that you are responding to. You can find more information\n about values for ChallengeName in the response parameters of AdminInitiateAuth.

", "smithy.api#required": {} } }, @@ -3223,7 +3223,7 @@ "Session": { "target": "com.amazonaws.cognitoidentityprovider#SessionType", "traits": { - "smithy.api#documentation": "

The session that should be passed both ways in challenge-response calls to the\n service. If an InitiateAuth or RespondToAuthChallenge API call\n determines that the caller must pass another challenge, it returns a session with other\n challenge parameters. This session should be passed as it is to the next\n RespondToAuthChallenge API call.

" + "smithy.api#documentation": "

The session identifier that maintains the state of authentication requests and\n challenge responses. If an AdminInitiateAuth or\n AdminRespondToAuthChallenge API request results in a determination that\n your application must pass another challenge, Amazon Cognito returns a session with other\n challenge parameters. Send this session identifier, unmodified, to the next\n AdminRespondToAuthChallenge request.

" } }, "AnalyticsMetadata": { @@ -3235,13 +3235,13 @@ "ContextData": { "target": "com.amazonaws.cognitoidentityprovider#ContextDataType", "traits": { - "smithy.api#documentation": "

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced \nsecurity evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito\nwhen it makes API requests.

" + "smithy.api#documentation": "

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced \nsecurity evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito\nwhen it makes API requests.

\n

For more information, see Collecting data for threat protection in\napplications.

" } }, "ClientMetadata": { "target": "com.amazonaws.cognitoidentityprovider#ClientMetadataType", "traits": { - "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool triggers.\n When you use the AdminRespondToAuthChallenge API action, Amazon Cognito invokes any functions\n that you have assigned to the following triggers:

\n
    \n
  • \n

    pre sign-up

    \n
  • \n
  • \n

    custom message

    \n
  • \n
  • \n

    post authentication

    \n
  • \n
  • \n

    user migration

    \n
  • \n
  • \n

    pre token generation

    \n
  • \n
  • \n

    define auth challenge

    \n
  • \n
  • \n

    create auth challenge

    \n
  • \n
  • \n

    verify auth challenge response

    \n
  • \n
\n

When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the\n function receives as input. This payload contains a clientMetadata\n attribute that provides the data that you assigned to the ClientMetadata parameter in\n your AdminRespondToAuthChallenge request. In your function code in Lambda, you can\n process the clientMetadata value to enhance your workflow for your specific\n needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only to Lambda\n triggers that are assigned to a user pool to support custom workflows. If\n your user pool configuration doesn't include triggers, the ClientMetadata\n parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive\n information.

    \n
  • \n
\n
" + "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool triggers.\n When you use the AdminRespondToAuthChallenge API action, Amazon Cognito invokes any functions\n that you have assigned to the following triggers:

\n
    \n
  • \n

    Pre sign-up

    \n
  • \n
  • \n

    custom message

    \n
  • \n
  • \n

    Post authentication

    \n
  • \n
  • \n

    User migration

    \n
  • \n
  • \n

    Pre token generation

    \n
  • \n
  • \n

    Define auth challenge

    \n
  • \n
  • \n

    Create auth challenge

    \n
  • \n
  • \n

    Verify auth challenge response

    \n
  • \n
\n

When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the\n function receives as input. This payload contains a clientMetadata\n attribute that provides the data that you assigned to the ClientMetadata parameter in\n your AdminRespondToAuthChallenge request. In your function code in Lambda, you can\n process the clientMetadata value to enhance your workflow for your specific\n needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only\n to Lambda triggers that are assigned to a user pool to support custom\n workflows. If your user pool configuration doesn't include triggers, the\n ClientMetadata parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't send sensitive\n information in this parameter.

    \n
  • \n
\n
" } } }, @@ -3256,25 +3256,25 @@ "ChallengeName": { "target": "com.amazonaws.cognitoidentityprovider#ChallengeNameType", "traits": { - "smithy.api#documentation": "

The name of the challenge. For more information, see AdminInitiateAuth.

" + "smithy.api#documentation": "

The name of the challenge that you must next respond to. You can find more information\n about values for ChallengeName in the response parameters of AdminInitiateAuth.

" } }, "Session": { "target": "com.amazonaws.cognitoidentityprovider#SessionType", "traits": { - "smithy.api#documentation": "

The session that should be passed both ways in challenge-response calls to the\n service. If the caller must pass another challenge, they return a session with other\n challenge parameters. This session should be passed as it is to the next\n RespondToAuthChallenge API call.

" + "smithy.api#documentation": "

The session identifier that maintains the state of authentication requests and\n challenge responses. If an AdminInitiateAuth or\n AdminRespondToAuthChallenge API request results in a determination that\n your application must pass another challenge, Amazon Cognito returns a session with other\n challenge parameters. Send this session identifier, unmodified, to the next\n AdminRespondToAuthChallenge request.

" } }, "ChallengeParameters": { "target": "com.amazonaws.cognitoidentityprovider#ChallengeParametersType", "traits": { - "smithy.api#documentation": "

The challenge parameters. For more information, see AdminInitiateAuth.

" + "smithy.api#documentation": "

The parameters that define your response to the next challenge. Take the values in\n ChallengeParameters and provide values for them in the ChallengeResponses of the next AdminRespondToAuthChallenge\n request.

" } }, "AuthenticationResult": { "target": "com.amazonaws.cognitoidentityprovider#AuthenticationResultType", "traits": { - "smithy.api#documentation": "

The result returned by the server in response to the authentication request.

" + "smithy.api#documentation": "

The outcome of a successful authentication process. After your application has passed\n all challenges, Amazon Cognito returns an AuthenticationResult with the JSON web\n tokens (JWTs) that indicate successful sign-in.

" } } }, @@ -3315,7 +3315,7 @@ } ], "traits": { - "smithy.api#documentation": "

Sets the user's multi-factor authentication (MFA) preference, including which MFA\n options are activated, and if any are preferred. Only one factor can be set as\n preferred. The preferred MFA factor will be used to authenticate a user if multiple\n factors are activated. If multiple options are activated and no preference is set, a\n challenge to choose an MFA option will be returned during sign-in.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Sets the user's multi-factor authentication (MFA) preference, including which MFA\n options are activated, and if any are preferred. Only one factor can be set as\n preferred. The preferred MFA factor will be used to authenticate a user if multiple\n factors are activated. If multiple options are activated and no preference is set, a\n challenge to choose an MFA option will be returned during sign-in.

\n

This operation doesn't reset an existing TOTP MFA for a user. To register a new\n TOTP factor for a user, make an AssociateSoftwareToken request. For more information,\n see TOTP software token MFA.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AdminSetUserMFAPreferenceRequest": { @@ -3400,7 +3400,7 @@ } ], "traits": { - "smithy.api#documentation": "

Sets the specified user's password in a user pool as an administrator. Works on any\n user.

\n

The password can be temporary or permanent. If it is temporary, the user status enters\n the FORCE_CHANGE_PASSWORD state. When the user next tries to sign in, the\n InitiateAuth/AdminInitiateAuth response will contain the\n NEW_PASSWORD_REQUIRED challenge. If the user doesn't sign in before it\n expires, the user won't be able to sign in, and an administrator must reset their\n password.

\n

Once the user has set a new password, or the password is permanent, the user status is\n set to Confirmed.

\n

\n AdminSetUserPassword can set a password for the user profile that Amazon Cognito\n creates for third-party federated users. When you set a password, the federated user's\n status changes from EXTERNAL_PROVIDER to CONFIRMED. A user in\n this state can sign in as a federated user, and initiate authentication flows in the API\n like a linked native user. They can also modify their password and attributes in\n token-authenticated API requests like ChangePassword and\n UpdateUserAttributes. As a best security practice and to keep users in\n sync with your external IdP, don't set passwords on federated user profiles. To set up a\n federated user for native sign-in with a linked native user, refer to Linking federated users to an existing user\n profile.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Sets the specified user's password in a user pool. This operation administratively\n sets a temporary or permanent password for a user. With this operation, you can bypass\n self-service password changes and permit immediate sign-in with the password that you\n set. To do this, set Permanent to true.

\n

You can also set a new temporary password in this request, send it to a user, and\n require them to choose a new password on their next sign-in. To do this, set\n Permanent to false.

\n

If the password is temporary, the user's Status becomes\n FORCE_CHANGE_PASSWORD. When the user next tries to sign in, the\n InitiateAuth or AdminInitiateAuth response includes the\n NEW_PASSWORD_REQUIRED challenge. If the user doesn't sign in\n before the temporary password expires, they can no longer sign in and you must repeat\n this operation to set a temporary or permanent password for them.

\n

After the user sets a new password, or if you set a permanent password, their status\n becomes Confirmed.

\n

\n AdminSetUserPassword can set a password for the user profile that Amazon Cognito\n creates for third-party federated users. When you set a password, the federated user's\n status changes from EXTERNAL_PROVIDER to CONFIRMED. A user in\n this state can sign in as a federated user, and initiate authentication flows in the API\n like a linked native user. They can also modify their password and attributes in\n token-authenticated API requests like ChangePassword and\n UpdateUserAttributes. As a best security practice and to keep users in\n sync with your external IdP, don't set passwords on federated user profiles. To set up a\n federated user for native sign-in with a linked native user, refer to Linking federated users to an existing user\n profile.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AdminSetUserPasswordRequest": { @@ -3409,7 +3409,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool where you want to set the user's password.

", + "smithy.api#documentation": "

The ID of the user pool where you want to set the user's password.

", "smithy.api#required": {} } }, @@ -3423,7 +3423,7 @@ "Password": { "target": "com.amazonaws.cognitoidentityprovider#PasswordType", "traits": { - "smithy.api#documentation": "

The password for the user.

", + "smithy.api#documentation": "

The new temporary or permanent password that you want to set for the user. You\n can't remove the password for a user who already has a password so that they can\n only sign in with passwordless methods. In this scenario, you must create a new user\n without a password.

", "smithy.api#required": {} } }, @@ -3431,7 +3431,7 @@ "target": "com.amazonaws.cognitoidentityprovider#BooleanType", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

\n True if the password is permanent, False if it is\n temporary.

" + "smithy.api#documentation": "

Set to true to set a password that the user can immediately sign in with.\n Set to false to set a temporary password that the user must change on their\n next sign-in.

" } } }, @@ -3545,7 +3545,7 @@ } ], "traits": { - "smithy.api#documentation": "

Provides feedback for an authentication event indicating if it was from a valid user.\n This feedback is used for improving the risk evaluation decision for the user pool as\n part of Amazon Cognito advanced security.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Provides feedback for an authentication event indicating if it was from a valid user.\n This feedback is used for improving the risk evaluation decision for the user pool as\n part of Amazon Cognito threat protection. To train the threat-protection model to recognize\n trusted and untrusted sign-in characteristics, configure threat protection in audit-only\n mode and provide a mechanism for users or administrators to submit feedback. Your\n feedback can tell Amazon Cognito that a risk rating was assigned at a level you don't agree\n with.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AdminUpdateAuthEventFeedbackRequest": { @@ -3554,7 +3554,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID.

", + "smithy.api#documentation": "

The ID of the user pool where you want to submit authentication-event feedback.

", "smithy.api#required": {} } }, @@ -3568,7 +3568,7 @@ "EventId": { "target": "com.amazonaws.cognitoidentityprovider#EventIdType", "traits": { - "smithy.api#documentation": "

The authentication event ID.

", + "smithy.api#documentation": "

The authentication event ID. To query authentication events for a user, see AdminListUserAuthEvents.

", "smithy.api#required": {} } }, @@ -3623,7 +3623,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the device status as an administrator.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Updates the status of a user's device so that it is marked as remembered or not\n remembered for the purpose of device authentication. Device authentication is a\n \"remember me\" mechanism that silently completes sign-in from trusted devices with a\n device key instead of a user-provided MFA code. This operation changes the status of a\n device without deleting it, so you can enable it again later. For more information about\n device authentication, see Working with devices.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AdminUpdateDeviceStatusRequest": { @@ -3632,7 +3632,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID.

", + "smithy.api#documentation": "

The ID of the user pool where you want to change a user's device status.

", "smithy.api#required": {} } }, @@ -3646,14 +3646,14 @@ "DeviceKey": { "target": "com.amazonaws.cognitoidentityprovider#DeviceKeyType", "traits": { - "smithy.api#documentation": "

The device key.

", + "smithy.api#documentation": "

The unique identifier, or device key, of the device that you want to update the status\n for.

", "smithy.api#required": {} } }, "DeviceRememberedStatus": { "target": "com.amazonaws.cognitoidentityprovider#DeviceRememberedStatusType", "traits": { - "smithy.api#documentation": "

The status indicating whether a device has been remembered or not.

" + "smithy.api#documentation": "

To enable device authentication with the specified device, set to\n remembered.To disable, set to not_remembered.

" } } }, @@ -3720,7 +3720,7 @@ } ], "traits": { - "smithy.api#documentation": "\n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
\n

Updates the specified user's attributes, including developer attributes, as an\n administrator. Works on any user. To delete an attribute from your user, submit the\n attribute in your API request with a blank value.

\n

For custom attributes, you must prepend the custom: prefix to the\n attribute name.

\n

In addition to updating user attributes, this API can also be used to mark phone and\n email as verified.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "\n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
\n

Updates the specified user's attributes. To delete an attribute from your user,\n submit the attribute in your API request with a blank value.

\n

For custom attributes, you must prepend the custom: prefix to the\n attribute name.

\n

This operation can set a user's email address or phone number as verified and\n permit immediate sign-in in user pools that require verification of these attributes. To\n do this, set the email_verified or phone_number_verified\n attribute to true.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AdminUpdateUserAttributesRequest": { @@ -3729,7 +3729,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool where you want to update user attributes.

", + "smithy.api#documentation": "

The ID of the user pool where you want to update user attributes.

", "smithy.api#required": {} } }, @@ -3750,7 +3750,7 @@ "ClientMetadata": { "target": "com.amazonaws.cognitoidentityprovider#ClientMetadataType", "traits": { - "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool\n triggers. When you use the AdminUpdateUserAttributes API action, Amazon Cognito invokes the\n function that is assigned to the custom message trigger. When Amazon Cognito\n invokes this function, it passes a JSON payload, which the function receives as input.\n This payload contains a clientMetadata attribute, which provides the data\n that you assigned to the ClientMetadata parameter in your AdminUpdateUserAttributes\n request. In your function code in Lambda, you can process the\n clientMetadata value to enhance your workflow for your specific\n needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only to Lambda\n triggers that are assigned to a user pool to support custom workflows. If\n your user pool configuration doesn't include triggers, the ClientMetadata\n parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive\n information.

    \n
  • \n
\n
" + "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool\n triggers. When you use the AdminUpdateUserAttributes API action, Amazon Cognito invokes the\n function that is assigned to the custom message trigger. When Amazon Cognito\n invokes this function, it passes a JSON payload, which the function receives as input.\n This payload contains a clientMetadata attribute, which provides the data\n that you assigned to the ClientMetadata parameter in your AdminUpdateUserAttributes\n request. In your function code in Lambda, you can process the\n clientMetadata value to enhance your workflow for your specific\n needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only\n to Lambda triggers that are assigned to a user pool to support custom\n workflows. If your user pool configuration doesn't include triggers, the\n ClientMetadata parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't send sensitive\n information in this parameter.

    \n
  • \n
\n
" } } }, @@ -3796,7 +3796,7 @@ } ], "traits": { - "smithy.api#documentation": "

Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call\n this operation with your administrative credentials when your user signs out of your\n app. This results in the following behavior.

\n
    \n
  • \n

    Amazon Cognito no longer accepts token-authorized user operations\n that you authorize with a signed-out user's access tokens. For more information,\n see Using the Amazon Cognito user pools API and user pool\n endpoints.

    \n

    Amazon Cognito returns an Access Token has been revoked error when your\n app attempts to authorize a user pools API request with a revoked access token\n that contains the scope aws.cognito.signin.user.admin.

    \n
  • \n
  • \n

    Amazon Cognito no longer accepts a signed-out user's ID token in a GetId request to an identity pool with\n ServerSideTokenCheck enabled for its user pool IdP\n configuration in CognitoIdentityProvider.

    \n
  • \n
  • \n

    Amazon Cognito no longer accepts a signed-out user's refresh tokens in refresh\n requests.

    \n
  • \n
\n

Other requests might be valid until your user's token expires.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call\n this operation with your administrative credentials when your user signs out of your\n app. This results in the following behavior.

\n
    \n
  • \n

    Amazon Cognito no longer accepts token-authorized user operations\n that you authorize with a signed-out user's access tokens. For more information,\n see Using the Amazon Cognito user pools API and user pool\n endpoints.

    \n

    Amazon Cognito returns an Access Token has been revoked error when your\n app attempts to authorize a user pools API request with a revoked access token\n that contains the scope aws.cognito.signin.user.admin.

    \n
  • \n
  • \n

    Amazon Cognito no longer accepts a signed-out user's ID token in a GetId request to an identity pool with\n ServerSideTokenCheck enabled for its user pool IdP\n configuration in CognitoIdentityProvider.

    \n
  • \n
  • \n

    Amazon Cognito no longer accepts a signed-out user's refresh tokens in refresh\n requests.

    \n
  • \n
\n

Other requests might be valid until your user's token expires. This operation\n doesn't clear the managed login session cookie. To clear the session for\n a user who signed in with managed login or the classic hosted UI, direct their browser\n session to the logout endpoint.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#AdminUserGlobalSignOutRequest": { @@ -3805,7 +3805,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID.

", + "smithy.api#documentation": "

The ID of the user pool where you want to sign out a user.

", "smithy.api#required": {} } }, @@ -4229,7 +4229,7 @@ ], "traits": { "smithy.api#auth": [], - "smithy.api#documentation": "

Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA)\n for a user, with a unique private key that Amazon Cognito generates and returns in the API\n response. You can authorize an AssociateSoftwareToken request with either\n the user's access token, or a session string from a challenge response that you received\n from Amazon Cognito.

\n \n

Amazon Cognito disassociates an existing software token when you verify the new token in a\n VerifySoftwareToken API request. If you don't verify the software\n token and your user pool doesn't require MFA, the user can then authenticate with\n user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito\n generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge\n each time your user signs in. Complete setup with\n AssociateSoftwareToken and VerifySoftwareToken.

\n

After you set up software token MFA for your user, Amazon Cognito generates a\n SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to\n this challenge with your user's TOTP.

\n
\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
", + "smithy.api#documentation": "

Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA)\n for a user, with a unique private key that Amazon Cognito generates and returns in the API\n response. You can authorize an AssociateSoftwareToken request with either\n the user's access token, or a session string from a challenge response that you received\n from Amazon Cognito.

\n \n

Amazon Cognito disassociates an existing software token when you verify the new token in a\n VerifySoftwareToken API request. If you don't verify the software\n token and your user pool doesn't require MFA, the user can then authenticate with\n user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito\n generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge\n each time your user signs in. Complete setup with\n AssociateSoftwareToken and VerifySoftwareToken.

\n

After you set up software token MFA for your user, Amazon Cognito generates a\n SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to\n this challenge with your user's TOTP.

\n
\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
\n

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

", "smithy.api#optionalAuth": {} } }, @@ -4239,13 +4239,13 @@ "AccessToken": { "target": "com.amazonaws.cognitoidentityprovider#TokenModelType", "traits": { - "smithy.api#documentation": "

A valid access token that Amazon Cognito issued to the user whose software token you want to\n generate.

" + "smithy.api#documentation": "

A valid access token that Amazon Cognito issued to the user whose software token you want to\n generate. You can provide either an access token or a session ID in the request.

" } }, "Session": { "target": "com.amazonaws.cognitoidentityprovider#SessionType", "traits": { - "smithy.api#documentation": "

The session that should be passed both ways in challenge-response calls to the\n service. This allows authentication of the user as part of the MFA setup process.

" + "smithy.api#documentation": "

The session identifier that maintains the state of authentication requests and\n challenge responses. In AssociateSoftwareToken, this is the session ID from\n a successful sign-in. You can provide either an access token or a session ID in the\n request.

" } } }, @@ -4259,13 +4259,13 @@ "SecretCode": { "target": "com.amazonaws.cognitoidentityprovider#SecretCodeType", "traits": { - "smithy.api#documentation": "

A unique generated shared secret code that is used in the TOTP algorithm to generate a\n one-time code.

" + "smithy.api#documentation": "

A unique generated shared secret code that is used by the TOTP algorithm to generate a\n one-time code.

" } }, "Session": { "target": "com.amazonaws.cognitoidentityprovider#SessionType", "traits": { - "smithy.api#documentation": "

The session that should be passed both ways in challenge-response calls to the\n service. This allows authentication of the user as part of the MFA setup process.

" + "smithy.api#documentation": "

The session identifier that maintains the state of authentication requests and\n challenge responses. This session ID is valid for the next request in this flow, VerifySoftwareToken.

" } } }, @@ -4883,7 +4883,7 @@ "ProposedPassword": { "target": "com.amazonaws.cognitoidentityprovider#PasswordType", "traits": { - "smithy.api#documentation": "

The new password.

", + "smithy.api#documentation": "

A new password that you prompted the user to enter in your application.

", "smithy.api#required": {} } }, @@ -5123,7 +5123,7 @@ "AccessToken": { "target": "com.amazonaws.cognitoidentityprovider#TokenModelType", "traits": { - "smithy.api#documentation": "

A valid access token that Amazon Cognito issued to the user whose passkey registration you want\n to verify.

", + "smithy.api#documentation": "

A valid access token that Amazon Cognito issued to the user whose passkey registration you want\n to complete.

", "smithy.api#required": {} } }, @@ -5288,7 +5288,7 @@ ], "traits": { "smithy.api#auth": [], - "smithy.api#documentation": "

Confirms tracking of the device. This API call is the call that begins device\n tracking. For more information about device authentication, see Working with user devices in your user pool.

\n

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
", + "smithy.api#documentation": "

Confirms a device that a user wants to remember. A remembered device is a \"Remember me\n on this device\" option for user pools that perform authentication with the device key of\n a trusted device in the back end, instead of a user-provided MFA code. For more\n information about device authentication, see Working with user devices in your user pool.

\n

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
", "smithy.api#optionalAuth": {} } }, @@ -5305,7 +5305,7 @@ "DeviceKey": { "target": "com.amazonaws.cognitoidentityprovider#DeviceKeyType", "traits": { - "smithy.api#documentation": "

The device key.

", + "smithy.api#documentation": "

The unique identifier, or device key, of the device that you want to update the status\n for.

", "smithy.api#required": {} } }, @@ -5318,12 +5318,12 @@ "DeviceName": { "target": "com.amazonaws.cognitoidentityprovider#DeviceNameType", "traits": { - "smithy.api#documentation": "

The device name.

" + "smithy.api#documentation": "

A friendly name for the device, for example MyMobilePhone.

" } } }, "traits": { - "smithy.api#documentation": "

Confirms the device request.

", + "smithy.api#documentation": "

The confirm-device request.

", "smithy.api#input": {} } }, @@ -5334,12 +5334,12 @@ "target": "com.amazonaws.cognitoidentityprovider#BooleanType", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Indicates whether the user confirmation must confirm the device response.

" + "smithy.api#documentation": "

When true, your user must confirm that they want to remember the device.\n Prompt the user for an answer. You must then make an UpdateUserDevice request that sets the device to\n remembered or not_remembered.

\n

When false, immediately sets the device as remembered and eligible for\n device authentication.

\n

You can configure your user pool to always remember devices, in which case this\n response is false, or to allow users to opt in, in which case this response\n is true. Configure this option under Device tracking\n in the Sign-in menu of your user pool. You can also configure this\n option with the DeviceConfiguration parameter of a CreateUserPool or UpdateUserPool request.

" } } }, "traits": { - "smithy.api#documentation": "

Confirms the device response.

", + "smithy.api#documentation": "

The confirm-device response.

", "smithy.api#output": {} } }, @@ -5406,7 +5406,7 @@ ], "traits": { "smithy.api#auth": [], - "smithy.api#documentation": "

Allows a user to enter a confirmation code to reset a forgotten password.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
", + "smithy.api#documentation": "

This public API operation accepts a confirmation code that Amazon Cognito sent to a user and\n accepts a new password for that user.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
", "smithy.api#optionalAuth": {} } }, @@ -5416,7 +5416,7 @@ "ClientId": { "target": "com.amazonaws.cognitoidentityprovider#ClientIdType", "traits": { - "smithy.api#documentation": "

The app client ID of the app associated with the user pool.

", + "smithy.api#documentation": "

The ID of the app client where the user wants to reset their password. This parameter\n is an identifier of the client application that users are resetting their password from,\n but this operation resets users' passwords for all app clients in the user\n pool.

", "smithy.api#required": {} } }, @@ -5436,7 +5436,7 @@ "ConfirmationCode": { "target": "com.amazonaws.cognitoidentityprovider#ConfirmationCodeType", "traits": { - "smithy.api#documentation": "

The confirmation code from your user's request to reset their password. For more\n information, see ForgotPassword.

", + "smithy.api#documentation": "

The confirmation code that your user pool sent in response to an AdminResetUserPassword or a ForgotPassword request.

", "smithy.api#required": {} } }, @@ -5456,13 +5456,13 @@ "UserContextData": { "target": "com.amazonaws.cognitoidentityprovider#UserContextDataType", "traits": { - "smithy.api#documentation": "

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced \nsecurity evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito\nwhen it makes API requests.

" + "smithy.api#documentation": "

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced \nsecurity evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito\nwhen it makes API requests.

\n

For more information, see Collecting data for threat protection in\napplications.

" } }, "ClientMetadata": { "target": "com.amazonaws.cognitoidentityprovider#ClientMetadataType", "traits": { - "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool triggers.\n When you use the ConfirmForgotPassword API action, Amazon Cognito invokes the function that is\n assigned to the post confirmation trigger. When Amazon Cognito invokes this\n function, it passes a JSON payload, which the function receives as input. This payload\n contains a clientMetadata attribute, which provides the data that you\n assigned to the ClientMetadata parameter in your ConfirmForgotPassword request. In your\n function code in Lambda, you can process the clientMetadata value to\n enhance your workflow for your specific needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only to Lambda\n triggers that are assigned to a user pool to support custom workflows. If\n your user pool configuration doesn't include triggers, the ClientMetadata\n parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive\n information.

    \n
  • \n
\n
" + "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool triggers.\n When you use the ConfirmForgotPassword API action, Amazon Cognito invokes the function that is\n assigned to the post confirmation trigger. When Amazon Cognito invokes this\n function, it passes a JSON payload, which the function receives as input. This payload\n contains a clientMetadata attribute, which provides the data that you\n assigned to the ClientMetadata parameter in your ConfirmForgotPassword request. In your\n function code in Lambda, you can process the clientMetadata value to\n enhance your workflow for your specific needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only\n to Lambda triggers that are assigned to a user pool to support custom\n workflows. If your user pool configuration doesn't include triggers, the\n ClientMetadata parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't send sensitive\n information in this parameter.

    \n
  • \n
\n
" } } }, @@ -5536,7 +5536,7 @@ ], "traits": { "smithy.api#auth": [], - "smithy.api#documentation": "

This public API operation provides a code that Amazon Cognito sent to your user when they\n signed up in your user pool via the SignUp\n API operation. After your user enters their code, they confirm ownership of the email\n address or phone number that they provided, and their user account becomes active.\n Depending on your user pool configuration, your users will receive their confirmation\n code in an email or SMS message.

\n

Local users who signed up in your user pool are the only type of user who can confirm\n sign-up with a code. Users who federate through an external identity provider (IdP) have\n already been confirmed by their IdP. Administrator-created users, users created with the\n AdminCreateUser API operation, confirm their accounts when they respond to\n their invitation email message and choose a password. They do not receive a confirmation\n code. Instead, they receive a temporary password.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
", + "smithy.api#documentation": "

This public API operation submits a code that Amazon Cognito sent to your user when they signed\n up in your user pool via the SignUp\n API operation. After your user enters their code, they confirm ownership of the email\n address or phone number that they provided, and their user account becomes active.\n Depending on your user pool configuration, your users will receive their confirmation\n code in an email or SMS message.

\n

Local users who signed up in your user pool are the only type of user who can confirm\n sign-up with a code. Users who federate through an external identity provider (IdP) have\n already been confirmed by their IdP. Administrator-created users, users created with the\n AdminCreateUser API operation, confirm their accounts when they respond to\n their invitation email message and choose a password. They do not receive a confirmation\n code. Instead, they receive a temporary password.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
", "smithy.api#optionalAuth": {} } }, @@ -5553,7 +5553,7 @@ "SecretHash": { "target": "com.amazonaws.cognitoidentityprovider#SecretHashType", "traits": { - "smithy.api#documentation": "

A keyed-hash message authentication code (HMAC) calculated using the secret key of a\n user pool client and username plus the client ID in the message.

" + "smithy.api#documentation": "

A keyed-hash message authentication code (HMAC) calculated using the secret key of a\n user pool client and username plus the client ID in the message. For more information\n about SecretHash, see Computing secret hash values.

" } }, "Username": { @@ -5566,7 +5566,7 @@ "ConfirmationCode": { "target": "com.amazonaws.cognitoidentityprovider#ConfirmationCodeType", "traits": { - "smithy.api#documentation": "

The confirmation code sent by a user's request to confirm registration.

", + "smithy.api#documentation": "

The confirmation code that your user pool sent in response to the SignUp\n request.

", "smithy.api#required": {} } }, @@ -5574,7 +5574,7 @@ "target": "com.amazonaws.cognitoidentityprovider#ForceAliasCreation", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Boolean to be specified to force user confirmation irrespective of existing alias. By\n default set to False. If this parameter is set to True and the\n phone number/email used for sign up confirmation already exists as an alias with a\n different user, the API call will migrate the alias from the previous user to the newly\n created user being confirmed. If set to False, the API will throw an\n AliasExistsException error.

" + "smithy.api#documentation": "

When true, forces user confirmation despite any existing aliases.\n Defaults to false. A value of true migrates the alias from an\n existing user to the new user if an existing user already has the phone number or email\n address as an alias.

\n

Say, for example, that an existing user has an email attribute of\n bob@example.com and email is an alias in your user pool. If the new\n user also has an email of bob@example.com and your\n ConfirmSignUp response sets ForceAliasCreation to\n true, the new user can sign in with a username of\n bob@example.com and the existing user can no longer do so.

\n

If false and an attribute belongs to an existing alias, this request\n returns an AliasExistsException error.

\n

For more information about sign-in aliases, see Customizing sign-in attributes.

" } }, "AnalyticsMetadata": { @@ -5586,13 +5586,13 @@ "UserContextData": { "target": "com.amazonaws.cognitoidentityprovider#UserContextDataType", "traits": { - "smithy.api#documentation": "

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced \nsecurity evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito\nwhen it makes API requests.

" + "smithy.api#documentation": "

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced \nsecurity evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito\nwhen it makes API requests.

\n

For more information, see Collecting data for threat protection in\napplications.

" } }, "ClientMetadata": { "target": "com.amazonaws.cognitoidentityprovider#ClientMetadataType", "traits": { - "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool\n triggers. When you use the ConfirmSignUp API action, Amazon Cognito invokes the function that is\n assigned to the post confirmation trigger. When Amazon Cognito invokes this\n function, it passes a JSON payload, which the function receives as input. This payload\n contains a clientMetadata attribute, which provides the data that you\n assigned to the ClientMetadata parameter in your ConfirmSignUp request. In your function\n code in Lambda, you can process the clientMetadata value to\n enhance your workflow for your specific needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only to Lambda\n triggers that are assigned to a user pool to support custom workflows. If\n your user pool configuration doesn't include triggers, the ClientMetadata\n parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive\n information.

    \n
  • \n
\n
" + "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool\n triggers. When you use the ConfirmSignUp API action, Amazon Cognito invokes the function that is\n assigned to the post confirmation trigger. When Amazon Cognito invokes this\n function, it passes a JSON payload, which the function receives as input. This payload\n contains a clientMetadata attribute, which provides the data that you\n assigned to the ClientMetadata parameter in your ConfirmSignUp request. In your function\n code in Lambda, you can process the clientMetadata value to\n enhance your workflow for your specific needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only\n to Lambda triggers that are assigned to a user pool to support custom\n workflows. If your user pool configuration doesn't include triggers, the\n ClientMetadata parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't send sensitive\n information in this parameter.

    \n
  • \n
\n
" } }, "Session": { @@ -5613,7 +5613,7 @@ "Session": { "target": "com.amazonaws.cognitoidentityprovider#SessionType", "traits": { - "smithy.api#documentation": "

You can automatically sign users in with the one-time password that they provided in a\n successful ConfirmSignUp request. To do this, pass the Session\n parameter from the ConfirmSignUp response in the Session\n parameter of an InitiateAuth or AdminInitiateAuth request.

" + "smithy.api#documentation": "

A session identifier that you can use to immediately sign in the confirmed user. You\n can automatically sign users in with the one-time password that they provided in a\n successful ConfirmSignUp request. To do this, pass the Session\n parameter from this response in the Session parameter of an InitiateAuth or AdminInitiateAuth request.

" } } }, @@ -5706,7 +5706,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new group in the specified user pool.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Creates a new group in the specified user pool. For more information about user pool\n groups see Adding groups to a user pool.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#CreateGroupRequest": { @@ -5715,27 +5715,27 @@ "GroupName": { "target": "com.amazonaws.cognitoidentityprovider#GroupNameType", "traits": { - "smithy.api#documentation": "

The name of the group. Must be unique.

", + "smithy.api#documentation": "

A name for the group. This name must be unique in your user pool.

", "smithy.api#required": {} } }, "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool.

", + "smithy.api#documentation": "

The ID of the user pool where you want to create a user group.

", "smithy.api#required": {} } }, "Description": { "target": "com.amazonaws.cognitoidentityprovider#DescriptionType", "traits": { - "smithy.api#documentation": "

A string containing the description of the group.

" + "smithy.api#documentation": "

A description of the group that you're creating.

" } }, "RoleArn": { "target": "com.amazonaws.cognitoidentityprovider#ArnType", "traits": { - "smithy.api#documentation": "

The role Amazon Resource Name (ARN) for the group.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the IAM role that you want to associate with the\n group. A group role primarily declares a preferred role for the credentials that you get\n from an identity pool. Amazon Cognito ID tokens have a cognito:preferred_role claim\n that presents the highest-precedence group that a user belongs to. Both ID and access\n tokens also contain a cognito:groups claim that list all the groups that a\n user is a member of.

" } }, "Precedence": { @@ -5755,7 +5755,7 @@ "Group": { "target": "com.amazonaws.cognitoidentityprovider#GroupType", "traits": { - "smithy.api#documentation": "

The group object for the group.

" + "smithy.api#documentation": "

The response object for a created group.

" } } }, @@ -5795,7 +5795,7 @@ } ], "traits": { - "smithy.api#documentation": "

Adds a configuration and trust relationship between a third-party identity provider\n (IdP) and a user pool.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Adds a configuration and trust relationship between a third-party identity provider\n (IdP) and a user pool. Amazon Cognito accepts sign-in with third-party identity providers through\n managed login and OIDC relying-party libraries. For more information, see Third-party IdP sign-in.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#CreateIdentityProviderRequest": { @@ -5804,21 +5804,21 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID.

", + "smithy.api#documentation": "

The Id of the user pool where you want to create an IdP.

", "smithy.api#required": {} } }, "ProviderName": { "target": "com.amazonaws.cognitoidentityprovider#ProviderNameTypeV2", "traits": { - "smithy.api#documentation": "

The IdP name.

", + "smithy.api#documentation": "

The name that you want to assign to the IdP. You can pass the identity provider name\n in the identity_provider query parameter of requests to the Authorize endpoint to silently redirect to sign-in with the associated\n IdP.

", "smithy.api#required": {} } }, "ProviderType": { "target": "com.amazonaws.cognitoidentityprovider#IdentityProviderTypeType", "traits": { - "smithy.api#documentation": "

The IdP type.

", + "smithy.api#documentation": "

The type of IdP that you want to add. Amazon Cognito supports OIDC, SAML 2.0, Login With\n Amazon, Sign In With Apple, Google, and Facebook IdPs.

", "smithy.api#required": {} } }, @@ -5832,13 +5832,13 @@ "AttributeMapping": { "target": "com.amazonaws.cognitoidentityprovider#AttributeMappingType", "traits": { - "smithy.api#documentation": "

A mapping of IdP attributes to standard and custom user pool attributes.

" + "smithy.api#documentation": "

A mapping of IdP attributes to standard and custom user pool attributes. Specify a\n user pool attribute as the key of the key-value pair, and the IdP attribute claim name\n as the value.

" } }, "IdpIdentifiers": { "target": "com.amazonaws.cognitoidentityprovider#IdpIdentifiersListType", "traits": { - "smithy.api#documentation": "

A list of IdP identifiers.

" + "smithy.api#documentation": "

An array of IdP identifiers, for example \"IdPIdentifiers\": [ \"MyIdP\", \"MyIdP2\"\n ]. Identifiers are friendly names that you can pass in the\n idp_identifier query parameter of requests to the Authorize endpoint to silently redirect to sign-in with the associated IdP.\n Identifiers in a domain format also enable the use of email-address matching with SAML providers.

" } } }, @@ -5852,7 +5852,7 @@ "IdentityProvider": { "target": "com.amazonaws.cognitoidentityprovider#IdentityProviderType", "traits": { - "smithy.api#documentation": "

The newly created IdP object.

", + "smithy.api#documentation": "

The details of the new user pool IdP.

", "smithy.api#required": {} } } @@ -5896,7 +5896,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new set of branding settings for a user pool style and associates it with an\n app client. This operation is the programmatic option for the creation of a new style in\n the branding designer.

\n

Provides values for UI customization in a Settings JSON object and image\n files in an Assets array. To send the JSON object Document\n type parameter in Settings, you might need to update to the most recent\n version of your Amazon Web Services SDK.

\n

This operation has a 2-megabyte request-size limit and include the CSS settings and\n image assets for your app client. Your branding settings might exceed 2MB in size. Amazon Cognito\n doesn't require that you pass all parameters in one request and preserves existing\n style settings that you don't specify. If your request is larger than 2MB, separate it\n into multiple requests, each with a size smaller than the limit.

\n

For more information, see API and SDK operations for managed login branding\n

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Creates a new set of branding settings for a user pool style and associates it with an\n app client. This operation is the programmatic option for the creation of a new style in\n the branding designer.

\n

Provides values for UI customization in a Settings JSON object and image\n files in an Assets array. To send the JSON object Document\n type parameter in Settings, you might need to update to the most recent\n version of your Amazon Web Services SDK. To create a new style with default settings, set\n UseCognitoProvidedValues to true and don't provide\n values for any other options.

\n

This operation has a 2-megabyte request-size limit and include the CSS settings and\n image assets for your app client. Your branding settings might exceed 2MB in size. Amazon Cognito\n doesn't require that you pass all parameters in one request and preserves existing\n style settings that you don't specify. If your request is larger than 2MB, separate it\n into multiple requests, each with a size smaller than the limit.

\n

As a best practice, modify the output of DescribeManagedLoginBrandingByClient into the request parameters for this\n operation. To get all settings, set ReturnMergedResources to\n true. For more information, see API and SDK operations for managed login branding.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#CreateManagedLoginBrandingRequest": { @@ -5920,7 +5920,7 @@ "target": "com.amazonaws.cognitoidentityprovider#BooleanType", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

When true, applies the default branding style options. This option reverts to default\n style options that are managed by Amazon Cognito. You can modify them later in the branding\n designer.

\n

When you specify true for this option, you must also omit values for\n Settings and Assets in the request.

" + "smithy.api#documentation": "

When true, applies the default branding style options. These default options are\n managed by Amazon Cognito. You can modify them later in the branding designer.

\n

When you specify true for this option, you must also omit values for\n Settings and Assets in the request.

" } }, "Settings": { @@ -5983,7 +5983,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new OAuth2.0 resource server and defines custom scopes within it.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Creates a new OAuth2.0 resource server and defines custom scopes within it. Resource\n servers are associated with custom scopes and machine-to-machine (M2M) authorization.\n For more information, see Access control with resource servers.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#CreateResourceServerRequest": { @@ -5992,7 +5992,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool.

", + "smithy.api#documentation": "

The ID of the user pool where you want to create a resource server.

", "smithy.api#required": {} } }, @@ -6013,7 +6013,7 @@ "Scopes": { "target": "com.amazonaws.cognitoidentityprovider#ResourceServerScopeListType", "traits": { - "smithy.api#documentation": "

A list of scopes. Each scope is a key-value map with the keys name and\n description.

" + "smithy.api#documentation": "

A list of custom scopes. Each scope is a key-value map with the keys\n ScopeName and ScopeDescription. The name of a custom scope\n is a combination of ScopeName and the resource server Name in\n this request, for example MyResourceServerName/MyScopeName.

" } } }, @@ -6027,7 +6027,7 @@ "ResourceServer": { "target": "com.amazonaws.cognitoidentityprovider#ResourceServerType", "traits": { - "smithy.api#documentation": "

The newly created resource server.

", + "smithy.api#documentation": "

The details of the new resource server.

", "smithy.api#required": {} } } @@ -6068,7 +6068,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a user import job.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Creates a user import job. You can import users into user pools from a comma-separated\n values (CSV) file without adding Amazon Cognito MAU costs to your Amazon Web Services bill. To generate a\n template for your import, see GetCSVHeader. To learn more about CSV import, see\n Importing users from a CSV file.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#CreateUserImportJobRequest": { @@ -6077,21 +6077,21 @@ "JobName": { "target": "com.amazonaws.cognitoidentityprovider#UserImportJobNameType", "traits": { - "smithy.api#documentation": "

The job name for the user import job.

", + "smithy.api#documentation": "

A friendly name for the user import job.

", "smithy.api#required": {} } }, "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool that the users are being imported into.

", + "smithy.api#documentation": "

The ID of the user pool that you want to import users into.

", "smithy.api#required": {} } }, "CloudWatchLogsRoleArn": { "target": "com.amazonaws.cognitoidentityprovider#ArnType", "traits": { - "smithy.api#documentation": "

The role ARN for the Amazon CloudWatch Logs Logging role for the user import job.

", + "smithy.api#documentation": "

You must specify an IAM role that has permission to log import-job results to\n Amazon CloudWatch Logs. This parameter is the ARN of that role.

", "smithy.api#required": {} } } @@ -6107,7 +6107,7 @@ "UserImportJob": { "target": "com.amazonaws.cognitoidentityprovider#UserImportJobType", "traits": { - "smithy.api#documentation": "

The job object that represents the user import job.

" + "smithy.api#documentation": "

The details of the user import job.

" } } }, @@ -6160,7 +6160,7 @@ } ], "traits": { - "smithy.api#documentation": "\n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
\n

Creates a new Amazon Cognito user pool and sets the password policy for the\n pool.

\n \n

If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.

\n
\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
", + "smithy.api#documentation": "\n

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers\n require you to register an origination phone number before you can send SMS messages\n to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a\n phone number with Amazon Pinpoint.\n Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must\n receive SMS messages might not be able to sign up, activate their accounts, or sign\n in.

\n

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service,\n Amazon Simple Notification Service might place your account in the SMS sandbox. In \n sandbox\n mode\n , you can send messages only to verified phone\n numbers. After you test your app while in the sandbox environment, you can move out\n of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito\n Developer Guide.

\n
\n

Creates a new Amazon Cognito user pool. This operation sets basic and advanced configuration\n options. You can create a user pool in the Amazon Cognito console to your preferences and use the\n output of DescribeUserPool to generate requests from that\n baseline.

\n \n

If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.

\n
\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
", "smithy.api#examples": [ { "title": "Example user pool with email and username sign-in", @@ -6650,7 +6650,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates the user pool client.

\n

When you create a new user pool client, token revocation is automatically activated.\n For more information about revoking tokens, see RevokeToken.

\n \n

If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.

\n
\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
", + "smithy.api#documentation": "

Creates an app client in a user pool. This operation sets basic and advanced\n configuration options. You can create an app client in the Amazon Cognito console to your\n preferences and use the output of DescribeUserPoolClient to generate requests from that\n baseline.

\n

New app clients activate token revocation by default. For more information about\n revoking tokens, see RevokeToken.

\n \n

If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.

\n
\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
", "smithy.api#examples": [ { "title": "Example user pool app client with email and username sign-in", @@ -6783,14 +6783,14 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool where you want to create a user pool client.

", + "smithy.api#documentation": "

The ID of the user pool where you want to create an app client.

", "smithy.api#required": {} } }, "ClientName": { "target": "com.amazonaws.cognitoidentityprovider#ClientNameType", "traits": { - "smithy.api#documentation": "

The client name for the user pool client you would like to create.

", + "smithy.api#documentation": "

A friendly name for the app client that you want to create.

", "smithy.api#required": {} } }, @@ -6798,7 +6798,7 @@ "target": "com.amazonaws.cognitoidentityprovider#GenerateSecret", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Boolean to specify whether you want to generate a secret for the user pool client\n being created.

" + "smithy.api#documentation": "

When true, generates a client secret for the app client. Client secrets\n are used with server-side and machine-to-machine applications. For more information, see\n App client types.

" } }, "RefreshTokenValidity": { @@ -6823,7 +6823,7 @@ "TokenValidityUnits": { "target": "com.amazonaws.cognitoidentityprovider#TokenValidityUnitsType", "traits": { - "smithy.api#documentation": "

The units in which the validity times are represented. The default unit for\n RefreshToken is days, and default for ID and access tokens are hours.

" + "smithy.api#documentation": "

The units that validity times are represented in. The default unit for refresh tokens\n is days, and the default for ID and access tokens are hours.

" } }, "ReadAttributes": { @@ -6847,25 +6847,25 @@ "SupportedIdentityProviders": { "target": "com.amazonaws.cognitoidentityprovider#SupportedIdentityProvidersListType", "traits": { - "smithy.api#documentation": "

A list of provider names for the identity providers (IdPs) that are supported on this\n client. The following are supported: COGNITO, Facebook,\n Google, SignInWithApple, and LoginWithAmazon.\n You can also specify the names that you configured for the SAML and OIDC IdPs in your\n user pool, for example MySAMLIdP or MyOIDCIdP.

\n

This setting applies to providers that you can access with the hosted\n UI and OAuth 2.0 authorization server. The removal of COGNITO\n from this list doesn't prevent authentication operations for local users with the\n user pools API in an Amazon Web Services SDK. The only way to prevent API-based authentication is to\n block access with a WAF rule.

" + "smithy.api#documentation": "

A list of provider names for the identity providers (IdPs) that are supported on this\n client. The following are supported: COGNITO, Facebook,\n Google, SignInWithApple, and LoginWithAmazon.\n You can also specify the names that you configured for the SAML and OIDC IdPs in your\n user pool, for example MySAMLIdP or MyOIDCIdP.

\n

This setting applies to providers that you can access with managed \n login. The removal of COGNITO\n from this list doesn't prevent authentication operations for local users with the\n user pools API in an Amazon Web Services SDK. The only way to prevent API-based authentication is to\n block access with a WAF rule.

" } }, "CallbackURLs": { "target": "com.amazonaws.cognitoidentityprovider#CallbackURLsListType", "traits": { - "smithy.api#documentation": "

A list of allowed redirect (callback) URLs for the IdPs.

\n

A redirect URI must:

\n
    \n
  • \n

    Be an absolute URI.

    \n
  • \n
  • \n

    Be registered with the authorization server.

    \n
  • \n
  • \n

    Not include a fragment component.

    \n
  • \n
\n

See OAuth 2.0 -\n Redirection Endpoint.

\n

Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes\n only.

\n

App callback URLs such as myapp://example are also supported.

" + "smithy.api#documentation": "

A list of allowed redirect (callback) URLs for the IdPs.

\n

A redirect URI must:

\n
    \n
  • \n

    Be an absolute URI.

    \n
  • \n
  • \n

    Be registered with the authorization server. Amazon Cognito doesn't accept\n authorization requests with redirect_uri values that aren't in\n the list of CallbackURLs that you provide in this parameter.

    \n
  • \n
  • \n

    Not include a fragment component.

    \n
  • \n
\n

See OAuth 2.0 -\n Redirection Endpoint.

\n

Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes\n only.

\n

App callback URLs such as myapp://example are also supported.

" } }, "LogoutURLs": { "target": "com.amazonaws.cognitoidentityprovider#LogoutURLsListType", "traits": { - "smithy.api#documentation": "

A list of allowed logout URLs for the IdPs.

" + "smithy.api#documentation": "

A list of allowed logout URLs for managed login authentication. For more information,\n see Logout endpoint.

" } }, "DefaultRedirectURI": { "target": "com.amazonaws.cognitoidentityprovider#RedirectUrlType", "traits": { - "smithy.api#documentation": "

The default redirect URI. In app clients with one assigned IdP, replaces\n redirect_uri in authentication requests. Must be in the\n CallbackURLs list.

\n

A redirect URI must:

\n
    \n
  • \n

    Be an absolute URI.

    \n
  • \n
  • \n

    Be registered with the authorization server.

    \n
  • \n
  • \n

    Not include a fragment component.

    \n
  • \n
\n

For more information, see Default redirect URI.

\n

Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes\n only.

\n

App callback URLs such as myapp://example are also supported.

" + "smithy.api#documentation": "

The default redirect URI. In app clients with one assigned IdP, replaces\n redirect_uri in authentication requests. Must be in the\n CallbackURLs list.

" } }, "AllowedOAuthFlows": { @@ -6877,7 +6877,7 @@ "AllowedOAuthScopes": { "target": "com.amazonaws.cognitoidentityprovider#ScopeListType", "traits": { - "smithy.api#documentation": "

The allowed OAuth scopes. Possible values provided by OAuth are phone,\n email, openid, and profile. Possible values\n provided by Amazon Web Services are aws.cognito.signin.user.admin. Custom\n scopes created in Resource Servers are also supported.

" + "smithy.api#documentation": "

The OAuth 2.0 scopes that you want to permit your app client to authorize. Scopes\n govern access control to user pool self-service API operations, user data from the\n userInfo endpoint, and third-party APIs. Possible values provided by\n OAuth are phone, email, openid, and\n profile. Possible values provided by Amazon Web Services are\n aws.cognito.signin.user.admin. Custom scopes created in Resource\n Servers are also supported.

" } }, "AllowedOAuthFlowsUserPoolClient": { @@ -6890,7 +6890,7 @@ "AnalyticsConfiguration": { "target": "com.amazonaws.cognitoidentityprovider#AnalyticsConfigurationType", "traits": { - "smithy.api#documentation": "

The user pool analytics configuration for collecting metrics and sending them to your\n Amazon Pinpoint campaign.

\n \n

In Amazon Web Services Regions where Amazon Pinpoint isn't available, user pools only support sending\n events to Amazon Pinpoint projects in Amazon Web Services Region us-east-1. In Regions where Amazon Pinpoint is\n available, user pools support sending events to Amazon Pinpoint projects within that same\n Region.

\n
" + "smithy.api#documentation": "

The user pool analytics configuration for collecting metrics and sending them to your\n Amazon Pinpoint campaign.

\n

In Amazon Web Services Regions where Amazon Pinpoint isn't available, user pools might not have access to\n analytics or might be configurable with campaigns in the US East (N. Virginia) Region.\n For more information, see Using Amazon Pinpoint analytics.

" } }, "PreventUserExistenceErrors": { @@ -6929,7 +6929,7 @@ "UserPoolClient": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolClientType", "traits": { - "smithy.api#documentation": "

The user pool client that was just created.

" + "smithy.api#documentation": "

The details of the new app client.

" } } }, @@ -6967,7 +6967,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new domain for a user pool. The domain hosts user pool domain services like\n managed login, the hosted UI (classic), and the user pool authorization server.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

A user pool domain hosts managed login, an authorization server and web server for\n authentication in your application. This operation creates a new user pool prefix or\n custom domain and sets the managed login branding version. Set the branding version to\n 1 for hosted UI (classic) or 2 for managed login. When you\n choose a custom domain, you must provide an SSL certificate in the US East (N. Virginia)\n Amazon Web Services Region in your request.

\n

Your prefix domain might take up to one minute to take effect. Your custom domain is\n online within five minutes, but it can take up to one hour to distribute your SSL\n certificate.

\n

For more information about adding a custom domain to your user pool, see Configuring a user pool domain.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#CreateUserPoolDomainRequest": { @@ -6976,7 +6976,7 @@ "Domain": { "target": "com.amazonaws.cognitoidentityprovider#DomainType", "traits": { - "smithy.api#documentation": "

The domain string. For custom domains, this is the fully-qualified domain name, such\n as auth.example.com. For Amazon Cognito prefix domains, this is the prefix alone,\n such as auth.

", + "smithy.api#documentation": "

The domain string. For custom domains, this is the fully-qualified domain name, such\n as auth.example.com. For prefix domains, this is the prefix alone, such as\n myprefix. A prefix value of myprefix for a user pool in\n the us-east-1 Region results in a domain of\n myprefix.auth.us-east-1.amazoncognito.com.

", "smithy.api#required": {} } }, @@ -6990,13 +6990,13 @@ "ManagedLoginVersion": { "target": "com.amazonaws.cognitoidentityprovider#WrappedIntegerType", "traits": { - "smithy.api#documentation": "

The version of managed login branding that you want to apply to your domain. A value\n of 1 indicates hosted UI (classic) branding and a version of 2\n indicates managed login branding.

\n

Managed login requires that your user pool be configured for any feature plan other than Lite.

" + "smithy.api#documentation": "

The version of managed login branding that you want to apply to your domain. A value\n of 1 indicates hosted UI (classic) and a version of 2\n indicates managed login.

\n

Managed login requires that your user pool be configured for any feature plan other than Lite.

" } }, "CustomDomainConfig": { "target": "com.amazonaws.cognitoidentityprovider#CustomDomainConfigType", "traits": { - "smithy.api#documentation": "

The configuration for a custom domain that hosts the sign-up and sign-in webpages for\n your application.

\n

Provide this parameter only if you want to use a custom domain for your user pool.\n Otherwise, you can exclude this parameter and use the Amazon Cognito hosted domain\n instead.

\n

For more information about the hosted domain and custom domains, see Configuring a User Pool Domain.

" + "smithy.api#documentation": "

The configuration for a custom domain. Configures your domain with an Certificate Manager\n certificate in the us-east-1 Region.

\n

Provide this parameter only if you want to use a custom domain for your user pool.\n Otherwise, you can exclude this parameter and use a prefix domain instead.

\n

For more information about the hosted domain and custom domains, see Configuring a User Pool Domain.

" } } }, @@ -7010,7 +7010,7 @@ "ManagedLoginVersion": { "target": "com.amazonaws.cognitoidentityprovider#WrappedIntegerType", "traits": { - "smithy.api#documentation": "

The version of managed login branding applied your domain. A value of 1\n indicates hosted UI (classic) branding and a version of 2 indicates managed\n login branding.

" + "smithy.api#documentation": "

The version of managed login branding applied your domain. A value of 1\n indicates hosted UI (classic) and a version of 2 indicates managed\n login.

" } }, "CloudFrontDomain": { @@ -7030,14 +7030,14 @@ "PoolName": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolNameType", "traits": { - "smithy.api#documentation": "

A string used to name the user pool.

", + "smithy.api#documentation": "

A friendlhy name for your user pool.

", "smithy.api#required": {} } }, "Policies": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolPolicyType", "traits": { - "smithy.api#documentation": "

The policies associated with the new user pool.

" + "smithy.api#documentation": "

The password policy and sign-in policy in the user pool. The password policy sets\n options like password complexity requirements and password history. The sign-in policy\n sets the options available to applications in choice-based authentication.

" } }, "DeletionProtection": { @@ -7055,19 +7055,19 @@ "AutoVerifiedAttributes": { "target": "com.amazonaws.cognitoidentityprovider#VerifiedAttributesListType", "traits": { - "smithy.api#documentation": "

The attributes to be auto-verified. Possible values: email, phone_number.

" + "smithy.api#documentation": "

The attributes that you want your user pool to automatically verify. Possible values:\n email, phone_number. For more information see Verifying contact information at sign-up.

" } }, "AliasAttributes": { "target": "com.amazonaws.cognitoidentityprovider#AliasAttributesListType", "traits": { - "smithy.api#documentation": "

Attributes supported as an alias for this user pool. Possible values: phone_number, email, or\n preferred_username.

" + "smithy.api#documentation": "

Attributes supported as an alias for this user pool. Possible values: phone_number, email, or\n preferred_username. For more information about\n alias attributes, see Customizing sign-in attributes.

" } }, "UsernameAttributes": { "target": "com.amazonaws.cognitoidentityprovider#UsernameAttributesListType", "traits": { - "smithy.api#documentation": "

Specifies whether a user can use an email address or phone number as a username when\n they sign up.

" + "smithy.api#documentation": "

Specifies whether a user can use an email address or phone number as a username when\n they sign up. For more information, see Customizing sign-in attributes.

" } }, "SmsVerificationMessage": { @@ -7103,7 +7103,7 @@ "MfaConfiguration": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolMfaType", "traits": { - "smithy.api#documentation": "

Specifies MFA configuration details.

" + "smithy.api#documentation": "

Sets multi-factor authentication (MFA) to be on, off, or optional. When\n ON, all users must set up MFA before they can sign in. When\n OPTIONAL, your application must make a client-side determination of\n whether a user wants to register an MFA device. For user pools with adaptive\n authentication with threat protection, choose OPTIONAL.

" } }, "UserAttributeUpdateSettings": { @@ -7115,7 +7115,7 @@ "DeviceConfiguration": { "target": "com.amazonaws.cognitoidentityprovider#DeviceConfigurationType", "traits": { - "smithy.api#documentation": "

The device-remembering configuration for a user pool. A null value indicates that you\n have deactivated device remembering in your user pool.

\n \n

When you provide a value for any DeviceConfiguration field, you\n activate the Amazon Cognito device-remembering feature.

\n
" + "smithy.api#documentation": "

The device-remembering configuration for a user pool. Device remembering or device\n tracking is a \"Remember me on this device\" option for user pools that perform\n authentication with the device key of a trusted device in the back end, instead of a\n user-provided MFA code. For more information about device authentication, see Working with user devices in your user pool. A null value indicates that\n you have deactivated device remembering in your user pool.

\n \n

When you provide a value for any DeviceConfiguration field, you\n activate the Amazon Cognito device-remembering feature. For more infor

\n
" } }, "EmailConfiguration": { @@ -7127,7 +7127,7 @@ "SmsConfiguration": { "target": "com.amazonaws.cognitoidentityprovider#SmsConfigurationType", "traits": { - "smithy.api#documentation": "

The SMS configuration with the settings that your Amazon Cognito user pool must use to send an\n SMS message from your Amazon Web Services account through Amazon Simple Notification Service. To send SMS messages\n with Amazon SNS in the Amazon Web Services Region that you want, the Amazon Cognito user pool uses an Identity and Access Management\n (IAM) role in your Amazon Web Services account.

" + "smithy.api#documentation": "

The SMS configuration with the settings that your Amazon Cognito user pool must use to send an\n SMS message from your Amazon Web Services account through Amazon Simple Notification Service. To send SMS messages\n with Amazon SNS in the Amazon Web Services Region that you want, the Amazon Cognito user pool uses an Identity and Access Management\n (IAM) role in your Amazon Web Services account. For more information see SMS message settings.

" } }, "UserPoolTags": { @@ -7139,13 +7139,13 @@ "AdminCreateUserConfig": { "target": "com.amazonaws.cognitoidentityprovider#AdminCreateUserConfigType", "traits": { - "smithy.api#documentation": "

The configuration for AdminCreateUser requests.

" + "smithy.api#documentation": "

The configuration for AdminCreateUser requests. Includes the template for the\n invitation message for new users, the duration of temporary passwords, and permitting\n self-service sign-up.

" } }, "Schema": { "target": "com.amazonaws.cognitoidentityprovider#SchemaAttributesListType", "traits": { - "smithy.api#documentation": "

An array of schema attributes for the new user pool. These attributes can be standard\n or custom attributes.

" + "smithy.api#documentation": "

An array of attributes for the new user pool. You can add custom attributes and modify\n the properties of default attributes. The specifications in this parameter set the\n required attributes in your user pool. For more information, see Working with user attributes.

" } }, "UserPoolAddOns": { @@ -7157,7 +7157,7 @@ "UsernameConfiguration": { "target": "com.amazonaws.cognitoidentityprovider#UsernameConfigurationType", "traits": { - "smithy.api#documentation": "

Case sensitivity on the username input for the selected sign-in option. When case\n sensitivity is set to False (case insensitive), users can sign in with any\n combination of capital and lowercase letters. For example, username,\n USERNAME, or UserName, or for email,\n email@example.com or EMaiL@eXamplE.Com. For most use\n cases, set case sensitivity to False (case insensitive) as a best practice.\n When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in\n case as the same user, and prevents a case variation from being assigned to the same\n attribute for a different user.

\n

This configuration is immutable after you set it. For more information, see UsernameConfigurationType.

" + "smithy.api#documentation": "

Sets the case sensitivity option for sign-in usernames. When\n CaseSensitive is false (case insensitive), users can sign\n in with any combination of capital and lowercase letters. For example,\n username, USERNAME, or UserName, or for\n email, email@example.com or EMaiL@eXamplE.Com. For most use\n cases, set case sensitivity to false as a best practice. When usernames and\n email addresses are case insensitive, Amazon Cognito treats any variation in case as the same\n user, and prevents a case variation from being assigned to the same attribute for a\n different user.

\n

When CaseSensitive is true (case sensitive), Amazon Cognito\n interprets USERNAME and UserName as distinct users.

\n

This configuration is immutable after you set it.

" } }, "AccountRecoverySetting": { @@ -7184,7 +7184,7 @@ "UserPool": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolType", "traits": { - "smithy.api#documentation": "

A container for the user pool details.

" + "smithy.api#documentation": "

The details of the created user pool.

" } } }, @@ -7342,7 +7342,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a group.

\n

Calling this action requires developer credentials.

" + "smithy.api#documentation": "

Deletes a group from the specified user pool. When you delete a group, that group no\n longer contributes to users' cognito:preferred_group or\n cognito:groups claims, and no longer influence access-control decision\n that are based on group membership. For more information about user pool groups, see\n Adding groups to a user pool.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#DeleteGroupRequest": { @@ -7351,14 +7351,14 @@ "GroupName": { "target": "com.amazonaws.cognitoidentityprovider#GroupNameType", "traits": { - "smithy.api#documentation": "

The name of the group.

", + "smithy.api#documentation": "

The name of the group that you want to delete.

", "smithy.api#required": {} } }, "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool.

", + "smithy.api#documentation": "

The ID of the user pool where you want to delete the group.

", "smithy.api#required": {} } } @@ -7399,7 +7399,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an IdP for a user pool.

" + "smithy.api#documentation": "

Deletes a user pool identity provider (IdP). After you delete an IdP, users can no\n longer sign in to your user pool through that IdP. For more information about user pool\n IdPs, see Third-party IdP sign-in.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#DeleteIdentityProviderRequest": { @@ -7408,14 +7408,14 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID.

", + "smithy.api#documentation": "

The ID of the user pool where you want to delete the identity provider.

", "smithy.api#required": {} } }, "ProviderName": { "target": "com.amazonaws.cognitoidentityprovider#ProviderNameType", "traits": { - "smithy.api#documentation": "

The IdP name.

", + "smithy.api#documentation": "

The name of the IdP that you want to delete.

", "smithy.api#required": {} } } @@ -7453,7 +7453,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a managed login branding style. When you delete a style, you delete the\n branding association for an app client and restore it to default settings.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Deletes a managed login branding style. When you delete a style, you delete the\n branding association for an app client. When an app client doesn't have a style\n assigned, your managed login pages for that app client are nonfunctional until you\n create a new style or switch the domain branding version.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#DeleteManagedLoginBrandingRequest": { @@ -7504,7 +7504,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a resource server.

" + "smithy.api#documentation": "

Deletes a resource server. After you delete a resource server, users can no longer\n generate access tokens with scopes that are associate with that resource server.

\n

Resource servers are associated with custom scopes and machine-to-machine (M2M)\n authorization. For more information, see Access control with resource servers.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#DeleteResourceServerRequest": { @@ -7513,14 +7513,14 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool that hosts the resource server.

", + "smithy.api#documentation": "

The ID of the user pool where you want to delete the resource server.

", "smithy.api#required": {} } }, "Identifier": { "target": "com.amazonaws.cognitoidentityprovider#ResourceServerIdentifierType", "traits": { - "smithy.api#documentation": "

The identifier for the resource server.

", + "smithy.api#documentation": "

The identifier of the resource server that you want to delete.

", "smithy.api#required": {} } } @@ -7568,7 +7568,7 @@ ], "traits": { "smithy.api#auth": [], - "smithy.api#documentation": "

Allows a user to delete their own user profile.

\n

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
", + "smithy.api#documentation": "

Self-deletes a user profile. A deleted user profile can no longer be used to sign in\n and can't be restored.

\n

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
", "smithy.api#optionalAuth": {} } }, @@ -7611,7 +7611,7 @@ ], "traits": { "smithy.api#auth": [], - "smithy.api#documentation": "

Deletes the attributes for a user.

\n

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
", + "smithy.api#documentation": "

Self-deletes attributes for a user. For example, your application can submit a request\n to this operation when a user wants to remove their birthdate attribute\n value.

\n

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
", "smithy.api#optionalAuth": {} } }, @@ -7621,7 +7621,7 @@ "UserAttributeNames": { "target": "com.amazonaws.cognitoidentityprovider#AttributeNameListType", "traits": { - "smithy.api#documentation": "

An array of strings representing the user attribute names you want to delete.

\n

For custom attributes, you must prependattach the custom: prefix to the\n front of the attribute name.

", + "smithy.api#documentation": "

An array of strings representing the user attribute names you want to delete.

\n

For custom attributes, you must prepend the custom: prefix to the\n attribute name, for example custom:department.

", "smithy.api#required": {} } }, @@ -7675,7 +7675,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the specified Amazon Cognito user pool.

" + "smithy.api#documentation": "

Deletes a user pool. After you delete a user pool, users can no longer sign in to any\n associated applications.

\n

" } }, "com.amazonaws.cognitoidentityprovider#DeleteUserPoolClient": { @@ -7707,7 +7707,7 @@ } ], "traits": { - "smithy.api#documentation": "

Allows the developer to delete the user pool client.

" + "smithy.api#documentation": "

Deletes a user pool app client. After you delete an app client, users can no longer\n sign in to the associated application.

" } }, "com.amazonaws.cognitoidentityprovider#DeleteUserPoolClientRequest": { @@ -7716,14 +7716,14 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool where you want to delete the client.

", + "smithy.api#documentation": "

The ID of the user pool where you want to delete the client.

", "smithy.api#required": {} } }, "ClientId": { "target": "com.amazonaws.cognitoidentityprovider#ClientIdType", "traits": { - "smithy.api#documentation": "

The app client ID of the app associated with the user pool.

", + "smithy.api#documentation": "

The ID of the user pool app client that you want to delete.

", "smithy.api#required": {} } } @@ -7756,7 +7756,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a domain for a user pool.

" + "smithy.api#documentation": "

Given a user pool ID and domain identifier, deletes a user pool domain. After you\n delete a user pool domain, your managed login pages and authorization server are no\n longer available.

" } }, "com.amazonaws.cognitoidentityprovider#DeleteUserPoolDomainRequest": { @@ -7765,14 +7765,14 @@ "Domain": { "target": "com.amazonaws.cognitoidentityprovider#DomainType", "traits": { - "smithy.api#documentation": "

The domain string. For custom domains, this is the fully-qualified domain name, such\n as auth.example.com. For Amazon Cognito prefix domains, this is the prefix alone,\n such as auth.

", + "smithy.api#documentation": "

The domain that you want to delete. For custom domains, this is the fully-qualified\n domain name, such as auth.example.com. For Amazon Cognito prefix domains, this is\n the prefix alone, such as auth.

", "smithy.api#required": {} } }, "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID.

", + "smithy.api#documentation": "

The ID of the user pool where you want to delete the domain.

", "smithy.api#required": {} } } @@ -7794,7 +7794,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool you want to delete.

", + "smithy.api#documentation": "

The ID of the user pool that you want to delete.

", "smithy.api#required": {} } } @@ -7847,7 +7847,7 @@ ], "traits": { "smithy.api#auth": [], - "smithy.api#documentation": "

Deletes a registered passkey, or webauthN, device for the currently signed-in\n user.

\n

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

", + "smithy.api#documentation": "

Deletes a registered passkey, or webauthN, authenticator for the currently signed-in\n user.

\n

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
", "smithy.api#optionalAuth": {} } }, @@ -7857,14 +7857,14 @@ "AccessToken": { "target": "com.amazonaws.cognitoidentityprovider#TokenModelType", "traits": { - "smithy.api#documentation": "

A valid access token that Amazon Cognito issued to the user whose passkey you want to\n delete.

", + "smithy.api#documentation": "

A valid access token that Amazon Cognito issued to the user whose passkey credential you want\n to delete.

", "smithy.api#required": {} } }, "CredentialId": { "target": "com.amazonaws.cognitoidentityprovider#StringType", "traits": { - "smithy.api#documentation": "

The unique identifier of the passkey that you want to delete. Look up registered\n devices with ListWebAuthnCredentials.

", + "smithy.api#documentation": "

The unique identifier of the passkey that you want to delete. Look up registered\n devices with ListWebAuthnCredentials.

", "smithy.api#required": {} } } @@ -7946,7 +7946,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets information about a specific IdP.

" + "smithy.api#documentation": "

Given a user pool ID and identity provider (IdP) name, returns details about the\n IdP.

" } }, "com.amazonaws.cognitoidentityprovider#DescribeIdentityProviderRequest": { @@ -7955,14 +7955,14 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID.

", + "smithy.api#documentation": "

The ID of the user pool that has the IdP that you want to describe..

", "smithy.api#required": {} } }, "ProviderName": { "target": "com.amazonaws.cognitoidentityprovider#ProviderNameType", "traits": { - "smithy.api#documentation": "

The IdP name.

", + "smithy.api#documentation": "

The name of the IdP that you want to describe.

", "smithy.api#required": {} } } @@ -7977,7 +7977,7 @@ "IdentityProvider": { "target": "com.amazonaws.cognitoidentityprovider#IdentityProviderType", "traits": { - "smithy.api#documentation": "

The identity provider details.

", + "smithy.api#documentation": "

The details of the requested IdP.

", "smithy.api#required": {} } } @@ -8012,7 +8012,7 @@ } ], "traits": { - "smithy.api#documentation": "

When given the ID of a managed login branding style, returns detailed information\n about the style.

" + "smithy.api#documentation": "

Given the ID of a managed login branding style, returns detailed information about the\n style.

" } }, "com.amazonaws.cognitoidentityprovider#DescribeManagedLoginBrandingByClient": { @@ -8041,7 +8041,7 @@ } ], "traits": { - "smithy.api#documentation": "

When given the ID of a user pool app client, returns detailed information about the\n style assigned to the app client.

" + "smithy.api#documentation": "

Given the ID of a user pool app client, returns detailed information about the style\n assigned to the app client.

" } }, "com.amazonaws.cognitoidentityprovider#DescribeManagedLoginBrandingByClientRequest": { @@ -8156,7 +8156,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes a resource server.

" + "smithy.api#documentation": "

Describes a resource server. For more information about resource servers, see Access control with resource servers.

" } }, "com.amazonaws.cognitoidentityprovider#DescribeResourceServerRequest": { @@ -8165,7 +8165,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool that hosts the resource server.

", + "smithy.api#documentation": "

The ID of the user pool that hosts the resource server.

", "smithy.api#required": {} } }, @@ -8187,7 +8187,7 @@ "ResourceServer": { "target": "com.amazonaws.cognitoidentityprovider#ResourceServerType", "traits": { - "smithy.api#documentation": "

The resource server.

", + "smithy.api#documentation": "

The details of the requested resource server.

", "smithy.api#required": {} } } @@ -8225,7 +8225,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes the risk configuration.

" + "smithy.api#documentation": "

Given an app client or user pool ID where threat protection is configured, describes\n the risk configuration. This operation returns details about adaptive authentication,\n compromised credentials, and IP-address allow- and denylists. For more information about\n threat protection, see Threat protection.

" } }, "com.amazonaws.cognitoidentityprovider#DescribeRiskConfigurationRequest": { @@ -8234,14 +8234,14 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID.

", + "smithy.api#documentation": "

The ID of the user pool with the risk configuration that you want to inspect. You can\n apply default risk configuration at the user pool level and further customize it from\n user pool defaults at the app-client level. Specify ClientId to inspect\n client-level configuration, or UserPoolId to inspect pool-level\n configuration.

", "smithy.api#required": {} } }, "ClientId": { "target": "com.amazonaws.cognitoidentityprovider#ClientIdType", "traits": { - "smithy.api#documentation": "

The app client ID.

" + "smithy.api#documentation": "

The ID of the app client with the risk configuration that you want to inspect. You can\n apply default risk configuration at the user pool level and further customize it from\n user pool defaults at the app-client level. Specify ClientId to inspect\n client-level configuration, or UserPoolId to inspect pool-level\n configuration.

" } } }, @@ -8255,7 +8255,7 @@ "RiskConfiguration": { "target": "com.amazonaws.cognitoidentityprovider#RiskConfigurationType", "traits": { - "smithy.api#documentation": "

The risk configuration.

", + "smithy.api#documentation": "

The details of the requested risk configuration.

", "smithy.api#required": {} } } @@ -8290,7 +8290,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes the user import job.

" + "smithy.api#documentation": "

Describes a user import job. For more information about user CSV import, see Importing users from a CSV file.

" } }, "com.amazonaws.cognitoidentityprovider#DescribeUserImportJobRequest": { @@ -8299,14 +8299,14 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool that the users are being imported into.

", + "smithy.api#documentation": "

The ID of the user pool that's associated with the import job.

", "smithy.api#required": {} } }, "JobId": { "target": "com.amazonaws.cognitoidentityprovider#UserImportJobIdType", "traits": { - "smithy.api#documentation": "

The job ID for the user import job.

", + "smithy.api#documentation": "

The Id of the user import job that you want to describe.

", "smithy.api#required": {} } } @@ -8322,7 +8322,7 @@ "UserImportJob": { "target": "com.amazonaws.cognitoidentityprovider#UserImportJobType", "traits": { - "smithy.api#documentation": "

The job object that represents the user import job.

" + "smithy.api#documentation": "

The details of the user import job.

" } } }, @@ -8360,7 +8360,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns the configuration information and metadata of the specified user pool.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Given a user pool ID, returns configuration information. This operation is useful when\n you want to inspect an existing user pool and programmatically replicate the\n configuration to another user pool.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#DescribeUserPoolClient": { @@ -8389,7 +8389,7 @@ } ], "traits": { - "smithy.api#documentation": "

Client method for returning the configuration information and metadata of the\n specified user pool app client.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Given an app client ID, returns configuration information. This operation is useful\n when you want to inspect an existing app client and programmatically replicate the\n configuration to another app client. For more information about app clients, see App clients.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#DescribeUserPoolClientRequest": { @@ -8398,14 +8398,14 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool you want to describe.

", + "smithy.api#documentation": "

The ID of the user pool that contains the app client you want to describe.

", "smithy.api#required": {} } }, "ClientId": { "target": "com.amazonaws.cognitoidentityprovider#ClientIdType", "traits": { - "smithy.api#documentation": "

The app client ID of the app associated with the user pool.

", + "smithy.api#documentation": "

The ID of the app client that you want to describe.

", "smithy.api#required": {} } } @@ -8421,7 +8421,7 @@ "UserPoolClient": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolClientType", "traits": { - "smithy.api#documentation": "

The user pool client from a server response to describe the user pool client.

" + "smithy.api#documentation": "

The details of the request app client.

" } } }, @@ -8453,7 +8453,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets information about a domain.

" + "smithy.api#documentation": "

Given a user pool domain name, returns information about the domain\n configuration.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#DescribeUserPoolDomainRequest": { @@ -8462,7 +8462,7 @@ "Domain": { "target": "com.amazonaws.cognitoidentityprovider#DomainType", "traits": { - "smithy.api#documentation": "

The domain string. For custom domains, this is the fully-qualified domain name, such\n as auth.example.com. For Amazon Cognito prefix domains, this is the prefix alone,\n such as auth.

", + "smithy.api#documentation": "

The domain that you want to describe. For custom domains, this is the fully-qualified\n domain name, such as auth.example.com. For Amazon Cognito prefix domains, this is\n the prefix alone, such as auth.

", "smithy.api#required": {} } } @@ -8477,7 +8477,7 @@ "DomainDescription": { "target": "com.amazonaws.cognitoidentityprovider#DomainDescriptionType", "traits": { - "smithy.api#documentation": "

A domain description object containing information about the domain.

" + "smithy.api#documentation": "

The details of the requested user pool domain.

" } } }, @@ -8491,7 +8491,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool you want to describe.

", + "smithy.api#documentation": "

The ID of the user pool you want to describe.

", "smithy.api#required": {} } } @@ -8507,7 +8507,7 @@ "UserPool": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolType", "traits": { - "smithy.api#documentation": "

The container of metadata returned by the server to describe the pool.

" + "smithy.api#documentation": "

The details of the requested user pool.

" } } }, @@ -8839,13 +8839,13 @@ "Message": { "target": "com.amazonaws.cognitoidentityprovider#EmailMfaMessageType", "traits": { - "smithy.api#documentation": "

The template for the email message that your user pool sends to users with an MFA\n code. The message must contain the {####} placeholder. In the message,\n Amazon Cognito replaces this placeholder with the code. If you don't provide this parameter,\n Amazon Cognito sends messages in the default format.

" + "smithy.api#documentation": "

The template for the email message that your user pool sends to users with a code for\n MFA and sign-in with an email OTP. The message must contain the {####}\n placeholder. In the message, Amazon Cognito replaces this placeholder with the code. If you\n don't provide this parameter, Amazon Cognito sends messages in the default format.

" } }, "Subject": { "target": "com.amazonaws.cognitoidentityprovider#EmailMfaSubjectType", "traits": { - "smithy.api#documentation": "

The subject of the email message that your user pool sends to users with an MFA\n code.

" + "smithy.api#documentation": "

The subject of the email message that your user pool sends to users with a code for\n MFA and email OTP sign-in.

" } } }, @@ -9476,13 +9476,13 @@ "SecretHash": { "target": "com.amazonaws.cognitoidentityprovider#SecretHashType", "traits": { - "smithy.api#documentation": "

A keyed-hash message authentication code (HMAC) calculated using the secret key of a\n user pool client and username plus the client ID in the message.

" + "smithy.api#documentation": "

A keyed-hash message authentication code (HMAC) calculated using the secret key of a\n user pool client and username plus the client ID in the message. For more information\n about SecretHash, see Computing secret hash values.

" } }, "UserContextData": { "target": "com.amazonaws.cognitoidentityprovider#UserContextDataType", "traits": { - "smithy.api#documentation": "

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced \nsecurity evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito\nwhen it makes API requests.

" + "smithy.api#documentation": "

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced \nsecurity evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito\nwhen it makes API requests.

\n

For more information, see Collecting data for threat protection in\napplications.

" } }, "Username": { @@ -9501,7 +9501,7 @@ "ClientMetadata": { "target": "com.amazonaws.cognitoidentityprovider#ClientMetadataType", "traits": { - "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool\n triggers. When you use the ForgotPassword API action, Amazon Cognito invokes any\n functions that are assigned to the following triggers: pre sign-up,\n custom message, and user migration. When\n Amazon Cognito invokes any of these functions, it passes a JSON payload, which the\n function receives as input. This payload contains a clientMetadata\n attribute, which provides the data that you assigned to the ClientMetadata parameter in\n your ForgotPassword request. In your function code in Lambda, you can\n process the clientMetadata value to enhance your workflow for your specific\n needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only to Lambda\n triggers that are assigned to a user pool to support custom workflows. If\n your user pool configuration doesn't include triggers, the ClientMetadata\n parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive\n information.

    \n
  • \n
\n
" + "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool\n triggers. When you use the ForgotPassword API action, Amazon Cognito invokes any\n functions that are assigned to the following triggers: pre sign-up,\n custom message, and user migration. When\n Amazon Cognito invokes any of these functions, it passes a JSON payload, which the\n function receives as input. This payload contains a clientMetadata\n attribute, which provides the data that you assigned to the ClientMetadata parameter in\n your ForgotPassword request. In your function code in Lambda, you can\n process the clientMetadata value to enhance your workflow for your specific\n needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only\n to Lambda triggers that are assigned to a user pool to support custom\n workflows. If your user pool configuration doesn't include triggers, the\n ClientMetadata parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't send sensitive\n information in this parameter.

    \n
  • \n
\n
" } } }, @@ -9566,7 +9566,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool that the users are to be imported into.

", + "smithy.api#documentation": "

The ID of the user pool that the users are to be imported into.

", "smithy.api#required": {} } } @@ -9582,7 +9582,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool that the users are to be imported into.

" + "smithy.api#documentation": "

The ID of the user pool that the users are to be imported into.

" } }, "CSVHeader": { @@ -9723,7 +9723,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool.

", + "smithy.api#documentation": "

The ID of the user pool.

", "smithy.api#required": {} } } @@ -9959,7 +9959,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool.

", + "smithy.api#documentation": "

The ID of the user pool.

", "smithy.api#required": {} } }, @@ -10119,7 +10119,7 @@ "ClientMetadata": { "target": "com.amazonaws.cognitoidentityprovider#ClientMetadataType", "traits": { - "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool\n triggers. When you use the GetUserAttributeVerificationCode API action, Amazon Cognito invokes\n the function that is assigned to the custom message trigger. When\n Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as\n input. This payload contains a clientMetadata attribute, which provides the\n data that you assigned to the ClientMetadata parameter in your\n GetUserAttributeVerificationCode request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for\n your specific needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only to Lambda\n triggers that are assigned to a user pool to support custom workflows. If\n your user pool configuration doesn't include triggers, the ClientMetadata\n parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive\n information.

    \n
  • \n
\n
" + "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool\n triggers. When you use the GetUserAttributeVerificationCode API action, Amazon Cognito invokes\n the function that is assigned to the custom message trigger. When\n Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as\n input. This payload contains a clientMetadata attribute, which provides the\n data that you assigned to the ClientMetadata parameter in your\n GetUserAttributeVerificationCode request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for\n your specific needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only\n to Lambda triggers that are assigned to a user pool to support custom\n workflows. If your user pool configuration doesn't include triggers, the\n ClientMetadata parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't send sensitive\n information in this parameter.

    \n
  • \n
\n
" } } }, @@ -10409,7 +10409,7 @@ ], "traits": { "smithy.api#auth": [], - "smithy.api#documentation": "

Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call\n this operation when your user signs out of your app. This results in the following\n behavior.

\n
    \n
  • \n

    Amazon Cognito no longer accepts token-authorized user operations\n that you authorize with a signed-out user's access tokens. For more information,\n see Using the Amazon Cognito user pools API and user pool\n endpoints.

    \n

    Amazon Cognito returns an Access Token has been revoked error when your\n app attempts to authorize a user pools API request with a revoked access token\n that contains the scope aws.cognito.signin.user.admin.

    \n
  • \n
  • \n

    Amazon Cognito no longer accepts a signed-out user's ID token in a GetId request to an identity pool with\n ServerSideTokenCheck enabled for its user pool IdP\n configuration in CognitoIdentityProvider.

    \n
  • \n
  • \n

    Amazon Cognito no longer accepts a signed-out user's refresh tokens in refresh\n requests.

    \n
  • \n
\n

Other requests might be valid until your user's token expires.

\n

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
", + "smithy.api#documentation": "

Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call\n this operation when your user signs out of your app. This results in the following\n behavior.

\n
    \n
  • \n

    Amazon Cognito no longer accepts token-authorized user operations\n that you authorize with a signed-out user's access tokens. For more information,\n see Using the Amazon Cognito user pools API and user pool\n endpoints.

    \n

    Amazon Cognito returns an Access Token has been revoked error when your\n app attempts to authorize a user pools API request with a revoked access token\n that contains the scope aws.cognito.signin.user.admin.

    \n
  • \n
  • \n

    Amazon Cognito no longer accepts a signed-out user's ID token in a GetId request to an identity pool with\n ServerSideTokenCheck enabled for its user pool IdP\n configuration in CognitoIdentityProvider.

    \n
  • \n
  • \n

    Amazon Cognito no longer accepts a signed-out user's refresh tokens in refresh\n requests.

    \n
  • \n
\n

Other requests might be valid until your user's token expires. This operation\n doesn't clear the managed login session cookie. To clear the session for\n a user who signed in with managed login or the classic hosted UI, direct their browser\n session to the logout endpoint.

\n

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
", "smithy.api#optionalAuth": {} } }, @@ -10791,7 +10791,7 @@ "AuthFlow": { "target": "com.amazonaws.cognitoidentityprovider#AuthFlowType", "traits": { - "smithy.api#documentation": "

The authentication flow that you want to initiate. The AuthParameters\n that you must submit are linked to the flow that you submit. For example:

\n
    \n
  • \n

    \n USER_AUTH: Request a preferred authentication type or review\n available authentication types. From the offered authentication types, select\n one in a challenge response and then authenticate with that method in an\n additional challenge response.

    \n
  • \n
  • \n

    \n REFRESH_TOKEN_AUTH: Receive new ID and access tokens when you\n pass a REFRESH_TOKEN parameter with a valid refresh token as the\n value.

    \n
  • \n
  • \n

    \n USER_SRP_AUTH: Receive secure remote password (SRP) variables for\n the next challenge, PASSWORD_VERIFIER, when you pass\n USERNAME and SRP_A parameters.

    \n
  • \n
  • \n

    \n USER_PASSWORD_AUTH: Receive new tokens or the next challenge, for\n example SOFTWARE_TOKEN_MFA, when you pass USERNAME and\n PASSWORD parameters.

    \n
  • \n
\n

Valid values include the following:

\n
\n
USER_AUTH
\n
\n

The entry point for sign-in with passwords, one-time passwords, biometric\n devices, and security keys.

\n
\n
USER_SRP_AUTH
\n
\n

Username-password authentication with the Secure Remote Password (SRP)\n protocol. For more information, see Use SRP password verification in custom\n authentication flow.

\n
\n
REFRESH_TOKEN_AUTH and REFRESH_TOKEN
\n
\n

Provide a valid refresh token and receive new ID and access tokens. For\n more information, see Using the refresh token.

\n
\n
CUSTOM_AUTH
\n
\n

Custom authentication with Lambda triggers. For more information, see\n Custom authentication challenge Lambda\n triggers.

\n
\n
USER_PASSWORD_AUTH
\n
\n

Username-password authentication with the password sent directly in the\n request. For more information, see Admin authentication flow.

\n
\n
\n

\n ADMIN_USER_PASSWORD_AUTH is a flow type of AdminInitiateAuth and isn't valid for InitiateAuth.\n ADMIN_NO_SRP_AUTH is a legacy server-side username-password flow and\n isn't valid for InitiateAuth.

", + "smithy.api#documentation": "

The authentication flow that you want to initiate. Each AuthFlow has\n linked AuthParameters that you must submit. The following are some example\n flows and their parameters.

\n
    \n
  • \n

    \n USER_AUTH: Request a preferred authentication type or review\n available authentication types. From the offered authentication types, select\n one in a challenge response and then authenticate with that method in an\n additional challenge response.

    \n
  • \n
  • \n

    \n REFRESH_TOKEN_AUTH: Receive new ID and access tokens when you\n pass a REFRESH_TOKEN parameter with a valid refresh token as the\n value.

    \n
  • \n
  • \n

    \n USER_SRP_AUTH: Receive secure remote password (SRP) variables for\n the next challenge, PASSWORD_VERIFIER, when you pass\n USERNAME and SRP_A parameters.

    \n
  • \n
  • \n

    \n USER_PASSWORD_AUTH: Receive new tokens or the next challenge, for\n example SOFTWARE_TOKEN_MFA, when you pass USERNAME and\n PASSWORD parameters.

    \n
  • \n
\n

\n All flows\n

\n
\n
USER_AUTH
\n
\n

The entry point for sign-in with passwords, one-time passwords, and\n WebAuthN authenticators.

\n
\n
USER_SRP_AUTH
\n
\n

Username-password authentication with the Secure Remote Password (SRP)\n protocol. For more information, see Use SRP password verification in custom\n authentication flow.

\n
\n
REFRESH_TOKEN_AUTH and REFRESH_TOKEN
\n
\n

Provide a valid refresh token and receive new ID and access tokens. For\n more information, see Using the refresh token.

\n
\n
CUSTOM_AUTH
\n
\n

Custom authentication with Lambda triggers. For more information, see\n Custom authentication challenge Lambda\n triggers.

\n
\n
USER_PASSWORD_AUTH
\n
\n

Username-password authentication with the password sent directly in the\n request. For more information, see Admin authentication flow.

\n
\n
\n

\n ADMIN_USER_PASSWORD_AUTH is a flow type of AdminInitiateAuth and isn't valid for InitiateAuth.\n ADMIN_NO_SRP_AUTH is a legacy server-side username-password flow and\n isn't valid for InitiateAuth.

", "smithy.api#required": {} } }, @@ -10804,7 +10804,7 @@ "ClientMetadata": { "target": "com.amazonaws.cognitoidentityprovider#ClientMetadataType", "traits": { - "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for certain custom\n workflows that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool triggers.\n When you use the InitiateAuth API action, Amazon Cognito invokes the Lambda functions that are\n specified for various triggers. The ClientMetadata value is passed as input to the\n functions for only the following triggers:

\n
    \n
  • \n

    Pre signup

    \n
  • \n
  • \n

    Pre authentication

    \n
  • \n
  • \n

    User migration

    \n
  • \n
\n

When Amazon Cognito invokes the functions for these triggers, it passes a JSON payload, which\n the function receives as input. This payload contains a validationData\n attribute, which provides the data that you assigned to the ClientMetadata parameter in\n your InitiateAuth request. In your function code in Lambda, you can process the\n validationData value to enhance your workflow for your specific\n needs.

\n

When you use the InitiateAuth API action, Amazon Cognito also invokes the functions for the\n following triggers, but it doesn't provide the ClientMetadata value as input:

\n
    \n
  • \n

    Post authentication

    \n
  • \n
  • \n

    Custom message

    \n
  • \n
  • \n

    Pre token generation

    \n
  • \n
  • \n

    Create auth challenge

    \n
  • \n
  • \n

    Define auth challenge

    \n
  • \n
  • \n

    Custom email sender

    \n
  • \n
  • \n

    Custom SMS sender

    \n
  • \n
\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only to Lambda\n triggers that are assigned to a user pool to support custom workflows. If\n your user pool configuration doesn't include triggers, the ClientMetadata\n parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive\n information.

    \n
  • \n
\n
" + "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for certain custom\n workflows that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool triggers.\n When you use the InitiateAuth API action, Amazon Cognito invokes the Lambda functions that are\n specified for various triggers. The ClientMetadata value is passed as input to the\n functions for only the following triggers:

\n
    \n
  • \n

    Pre signup

    \n
  • \n
  • \n

    Pre authentication

    \n
  • \n
  • \n

    User migration

    \n
  • \n
\n

When Amazon Cognito invokes the functions for these triggers, it passes a JSON payload, which\n the function receives as input. This payload contains a validationData\n attribute, which provides the data that you assigned to the ClientMetadata parameter in\n your InitiateAuth request. In your function code in Lambda, you can process the\n validationData value to enhance your workflow for your specific\n needs.

\n

When you use the InitiateAuth API action, Amazon Cognito also invokes the functions for the\n following triggers, but it doesn't provide the ClientMetadata value as input:

\n
    \n
  • \n

    Post authentication

    \n
  • \n
  • \n

    Custom message

    \n
  • \n
  • \n

    Pre token generation

    \n
  • \n
  • \n

    Create auth challenge

    \n
  • \n
  • \n

    Define auth challenge

    \n
  • \n
  • \n

    Custom email sender

    \n
  • \n
  • \n

    Custom SMS sender

    \n
  • \n
\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only\n to Lambda triggers that are assigned to a user pool to support custom\n workflows. If your user pool configuration doesn't include triggers, the\n ClientMetadata parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't send sensitive\n information in this parameter.

    \n
  • \n
\n
" } }, "ClientId": { @@ -10823,7 +10823,7 @@ "UserContextData": { "target": "com.amazonaws.cognitoidentityprovider#UserContextDataType", "traits": { - "smithy.api#documentation": "

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced \nsecurity evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito\nwhen it makes API requests.

" + "smithy.api#documentation": "

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced \nsecurity evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito\nwhen it makes API requests.

\n

For more information, see Collecting data for threat protection in\napplications.

" } }, "Session": { @@ -11276,7 +11276,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool.

", + "smithy.api#documentation": "

The ID of the user pool.

", "smithy.api#required": {} } }, @@ -11465,7 +11465,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool.

", + "smithy.api#documentation": "

The ID of the user pool.

", "smithy.api#required": {} } }, @@ -11600,7 +11600,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool that the users are being imported into.

", + "smithy.api#documentation": "

The ID of the user pool that the users are being imported into.

", "smithy.api#required": {} } }, @@ -11685,7 +11685,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool where you want to list user pool clients.

", + "smithy.api#documentation": "

The ID of the user pool where you want to list user pool clients.

", "smithy.api#required": {} } }, @@ -11966,7 +11966,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool.

", + "smithy.api#documentation": "

The ID of the user pool.

", "smithy.api#required": {} } }, @@ -12020,7 +12020,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool on which the search should be performed.

", + "smithy.api#documentation": "

The ID of the user pool on which the search should be performed.

", "smithy.api#required": {} } }, @@ -12340,7 +12340,7 @@ "target": "com.amazonaws.cognitoidentityprovider#BooleanType", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

When true, applies the default branding style options. This option reverts to a\n \"blank\" style that you can modify later in the branding designer.

" + "smithy.api#documentation": "

When true, applies the default branding style options. This option reverts to default\n style options that are managed by Amazon Cognito. You can modify them later in the branding\n designer.

\n

When you specify true for this option, you must also omit values for\n Settings and Assets in the request.

" } }, "Settings": { @@ -13106,13 +13106,13 @@ "SecretHash": { "target": "com.amazonaws.cognitoidentityprovider#SecretHashType", "traits": { - "smithy.api#documentation": "

A keyed-hash message authentication code (HMAC) calculated using the secret key of a\n user pool client and username plus the client ID in the message.

" + "smithy.api#documentation": "

A keyed-hash message authentication code (HMAC) calculated using the secret key of a\n user pool client and username plus the client ID in the message. For more information\n about SecretHash, see Computing secret hash values.

" } }, "UserContextData": { "target": "com.amazonaws.cognitoidentityprovider#UserContextDataType", "traits": { - "smithy.api#documentation": "

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced \nsecurity evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito\nwhen it makes API requests.

" + "smithy.api#documentation": "

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced \nsecurity evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito\nwhen it makes API requests.

\n

For more information, see Collecting data for threat protection in\napplications.

" } }, "Username": { @@ -13131,7 +13131,7 @@ "ClientMetadata": { "target": "com.amazonaws.cognitoidentityprovider#ClientMetadataType", "traits": { - "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool triggers.\n When you use the ResendConfirmationCode API action, Amazon Cognito invokes the function that is\n assigned to the custom message trigger. When Amazon Cognito invokes this\n function, it passes a JSON payload, which the function receives as input. This payload\n contains a clientMetadata attribute, which provides the data that you\n assigned to the ClientMetadata parameter in your ResendConfirmationCode request. In your\n function code in Lambda, you can process the clientMetadata value to enhance\n your workflow for your specific needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only to Lambda\n triggers that are assigned to a user pool to support custom workflows. If\n your user pool configuration doesn't include triggers, the ClientMetadata\n parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive\n information.

    \n
  • \n
\n
" + "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool triggers.\n When you use the ResendConfirmationCode API action, Amazon Cognito invokes the function that is\n assigned to the custom message trigger. When Amazon Cognito invokes this\n function, it passes a JSON payload, which the function receives as input. This payload\n contains a clientMetadata attribute, which provides the data that you\n assigned to the ClientMetadata parameter in your ResendConfirmationCode request. In your\n function code in Lambda, you can process the clientMetadata value to enhance\n your workflow for your specific needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only\n to Lambda triggers that are assigned to a user pool to support custom\n workflows. If your user pool configuration doesn't include triggers, the\n ClientMetadata parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't send sensitive\n information in this parameter.

    \n
  • \n
\n
" } } }, @@ -13415,13 +13415,13 @@ "UserContextData": { "target": "com.amazonaws.cognitoidentityprovider#UserContextDataType", "traits": { - "smithy.api#documentation": "

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced \nsecurity evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito\nwhen it makes API requests.

" + "smithy.api#documentation": "

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced \nsecurity evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito\nwhen it makes API requests.

\n

For more information, see Collecting data for threat protection in\napplications.

" } }, "ClientMetadata": { "target": "com.amazonaws.cognitoidentityprovider#ClientMetadataType", "traits": { - "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool\n triggers. When you use the RespondToAuthChallenge API action, Amazon Cognito invokes any\n functions that are assigned to the following triggers: post\n authentication, pre token generation,\n define auth challenge, create auth\n challenge, and verify auth challenge. When Amazon Cognito\n invokes any of these functions, it passes a JSON payload, which the function receives as\n input. This payload contains a clientMetadata attribute, which provides the\n data that you assigned to the ClientMetadata parameter in your RespondToAuthChallenge\n request. In your function code in Lambda, you can process the\n clientMetadata value to enhance your workflow for your specific\n needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only to Lambda\n triggers that are assigned to a user pool to support custom workflows. If\n your user pool configuration doesn't include triggers, the ClientMetadata\n parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive\n information.

    \n
  • \n
\n
" + "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool\n triggers. When you use the RespondToAuthChallenge API action, Amazon Cognito invokes any\n functions that are assigned to the following triggers: post\n authentication, pre token generation,\n define auth challenge, create auth\n challenge, and verify auth challenge. When Amazon Cognito\n invokes any of these functions, it passes a JSON payload, which the function receives as\n input. This payload contains a clientMetadata attribute, which provides the\n data that you assigned to the ClientMetadata parameter in your RespondToAuthChallenge\n request. In your function code in Lambda, you can process the\n clientMetadata value to enhance your workflow for your specific\n needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only\n to Lambda triggers that are assigned to a user pool to support custom\n workflows. If your user pool configuration doesn't include triggers, the\n ClientMetadata parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't send sensitive\n information in this parameter.

    \n
  • \n
\n
" } } }, @@ -14052,7 +14052,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool.

", + "smithy.api#documentation": "

The ID of the user pool.

", "smithy.api#required": {} } }, @@ -14130,7 +14130,7 @@ ], "traits": { "smithy.api#auth": [], - "smithy.api#documentation": "

Set the user's multi-factor authentication (MFA) method preference, including which\n MFA factors are activated and if any are preferred. Only one factor can be set as\n preferred. The preferred MFA factor will be used to authenticate a user if multiple\n factors are activated. If multiple options are activated and no preference is set, a\n challenge to choose an MFA option will be returned during sign-in. If an MFA type is\n activated for a user, the user will be prompted for MFA during all sign-in attempts\n unless device tracking is turned on and the device has been trusted. If you want MFA to\n be applied selectively based on the assessed risk level of sign-in attempts, deactivate\n MFA for users and turn on Adaptive Authentication for the user pool.

\n

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
", + "smithy.api#documentation": "

Set the user's multi-factor authentication (MFA) method preference, including which\n MFA factors are activated and if any are preferred. Only one factor can be set as\n preferred. The preferred MFA factor will be used to authenticate a user if multiple\n factors are activated. If multiple options are activated and no preference is set, a\n challenge to choose an MFA option will be returned during sign-in. If an MFA type is\n activated for a user, the user will be prompted for MFA during all sign-in attempts\n unless device tracking is turned on and the device has been trusted. If you want MFA to\n be applied selectively based on the assessed risk level of sign-in attempts, deactivate\n MFA for users and turn on Adaptive Authentication for the user pool.

\n

This operation doesn't reset an existing TOTP MFA for a user. To register a new\n TOTP factor for a user, make an AssociateSoftwareToken request. For more information,\n see TOTP software token MFA.

\n

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

\n \n

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you can't use IAM credentials to authorize requests, and you can't\n grant IAM permissions in policies. For more information about authorization models in\n Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

\n
", "smithy.api#optionalAuth": {} } }, @@ -14460,7 +14460,7 @@ "SecretHash": { "target": "com.amazonaws.cognitoidentityprovider#SecretHashType", "traits": { - "smithy.api#documentation": "

A keyed-hash message authentication code (HMAC) calculated using the secret key of a\n user pool client and username plus the client ID in the message.

" + "smithy.api#documentation": "

A keyed-hash message authentication code (HMAC) calculated using the secret key of a\n user pool client and username plus the client ID in the message. For more information\n about SecretHash, see Computing secret hash values.

" } }, "Username": { @@ -14497,13 +14497,13 @@ "UserContextData": { "target": "com.amazonaws.cognitoidentityprovider#UserContextDataType", "traits": { - "smithy.api#documentation": "

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced \nsecurity evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito\nwhen it makes API requests.

" + "smithy.api#documentation": "

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced \nsecurity evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito\nwhen it makes API requests.

\n

For more information, see Collecting data for threat protection in\napplications.

" } }, "ClientMetadata": { "target": "com.amazonaws.cognitoidentityprovider#ClientMetadataType", "traits": { - "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool triggers.\n When you use the SignUp API action, Amazon Cognito invokes any functions that are assigned to the\n following triggers: pre sign-up, custom\n message, and post confirmation. When Amazon Cognito invokes\n any of these functions, it passes a JSON payload, which the function receives as input.\n This payload contains a clientMetadata attribute, which provides the data\n that you assigned to the ClientMetadata parameter in your SignUp request. In your\n function code in Lambda, you can process the clientMetadata value to enhance\n your workflow for your specific needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only to Lambda\n triggers that are assigned to a user pool to support custom workflows. If\n your user pool configuration doesn't include triggers, the ClientMetadata\n parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive\n information.

    \n
  • \n
\n
" + "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action triggers.

\n

You create custom workflows by assigning Lambda functions to user pool triggers.\n When you use the SignUp API action, Amazon Cognito invokes any functions that are assigned to the\n following triggers: pre sign-up, custom\n message, and post confirmation. When Amazon Cognito invokes\n any of these functions, it passes a JSON payload, which the function receives as input.\n This payload contains a clientMetadata attribute, which provides the data\n that you assigned to the ClientMetadata parameter in your SignUp request. In your\n function code in Lambda, you can process the clientMetadata value to enhance\n your workflow for your specific needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only\n to Lambda triggers that are assigned to a user pool to support custom\n workflows. If your user pool configuration doesn't include triggers, the\n ClientMetadata parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't send sensitive\n information in this parameter.

    \n
  • \n
\n
" } } }, @@ -14726,7 +14726,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool that the users are being imported into.

", + "smithy.api#documentation": "

The ID of the user pool that the users are being imported into.

", "smithy.api#required": {} } }, @@ -14883,7 +14883,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool that the users are being imported into.

", + "smithy.api#documentation": "

The ID of the user pool that the users are being imported into.

", "smithy.api#required": {} } }, @@ -15547,7 +15547,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool.

", + "smithy.api#documentation": "

The ID of the user pool.

", "smithy.api#required": {} } }, @@ -15707,7 +15707,7 @@ } ], "traits": { - "smithy.api#documentation": "

Configures the branding settings for a user pool style. This operation is the\n programmatic option for the configuration of a style in the branding designer.

\n

Provides values for UI customization in a Settings JSON object and image\n files in an Assets array.

\n

This operation has a 2-megabyte request-size limit and include the CSS settings and\n image assets for your app client. Your branding settings might exceed 2MB in size. Amazon Cognito\n doesn't require that you pass all parameters in one request and preserves existing\n style settings that you don't specify. If your request is larger than 2MB, separate it\n into multiple requests, each with a size smaller than the limit.

\n

For more information, see API and SDK operations for managed login branding.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

Configures the branding settings for a user pool style. This operation is the\n programmatic option for the configuration of a style in the branding designer.

\n

Provides values for UI customization in a Settings JSON object and image\n files in an Assets array.

\n

This operation has a 2-megabyte request-size limit and include the CSS settings and\n image assets for your app client. Your branding settings might exceed 2MB in size. Amazon Cognito\n doesn't require that you pass all parameters in one request and preserves existing\n style settings that you don't specify. If your request is larger than 2MB, separate it\n into multiple requests, each with a size smaller than the limit.

\n

As a best practice, modify the output of DescribeManagedLoginBrandingByClient into the request parameters for this\n operation. To get all settings, set ReturnMergedResources to\n true. For more information, see API and SDK operations for managed login branding\n

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#UpdateManagedLoginBrandingRequest": { @@ -15798,7 +15798,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool.

", + "smithy.api#documentation": "

The ID of the user pool.

", "smithy.api#required": {} } }, @@ -15935,7 +15935,7 @@ "ClientMetadata": { "target": "com.amazonaws.cognitoidentityprovider#ClientMetadataType", "traits": { - "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action initiates.

\n

You create custom workflows by assigning Lambda functions to user pool triggers. When\n you use the UpdateUserAttributes API action, Amazon Cognito invokes the function that is assigned\n to the custom message trigger. When Amazon Cognito invokes this function, it\n passes a JSON payload, which the function receives as input. This payload contains a\n clientMetadata attribute, which provides the data that you assigned to\n the ClientMetadata parameter in your UpdateUserAttributes request. In your function code\n in Lambda, you can process the clientMetadata value to enhance your workflow\n for your specific needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only to Lambda\n triggers that are assigned to a user pool to support custom workflows. If\n your user pool configuration doesn't include triggers, the ClientMetadata\n parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive\n information.

    \n
  • \n
\n
" + "smithy.api#documentation": "

A map of custom key-value pairs that you can provide as input for any custom workflows\n that this action initiates.

\n

You create custom workflows by assigning Lambda functions to user pool triggers. When\n you use the UpdateUserAttributes API action, Amazon Cognito invokes the function that is assigned\n to the custom message trigger. When Amazon Cognito invokes this function, it\n passes a JSON payload, which the function receives as input. This payload contains a\n clientMetadata attribute, which provides the data that you assigned to\n the ClientMetadata parameter in your UpdateUserAttributes request. In your function code\n in Lambda, you can process the clientMetadata value to enhance your workflow\n for your specific needs.

\n

For more information, see \nCustomizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

\n \n

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the\n following:

\n
    \n
  • \n

    Store the ClientMetadata value. This data is available only\n to Lambda triggers that are assigned to a user pool to support custom\n workflows. If your user pool configuration doesn't include triggers, the\n ClientMetadata parameter serves no purpose.

    \n
  • \n
  • \n

    Validate the ClientMetadata value.

    \n
  • \n
  • \n

    Encrypt the ClientMetadata value. Don't send sensitive\n information in this parameter.

    \n
  • \n
\n
" } } }, @@ -16056,7 +16056,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool where you want to update the user pool\n client.

", + "smithy.api#documentation": "

The ID of the user pool where you want to update the user pool client.

", "smithy.api#required": {} } }, @@ -16119,7 +16119,7 @@ "SupportedIdentityProviders": { "target": "com.amazonaws.cognitoidentityprovider#SupportedIdentityProvidersListType", "traits": { - "smithy.api#documentation": "

A list of provider names for the identity providers (IdPs) that are supported on this\n client. The following are supported: COGNITO, Facebook,\n Google, SignInWithApple, and LoginWithAmazon.\n You can also specify the names that you configured for the SAML and OIDC IdPs in your\n user pool, for example MySAMLIdP or MyOIDCIdP.

\n

This setting applies to providers that you can access with the hosted\n UI and OAuth 2.0 authorization server. The removal of COGNITO\n from this list doesn't prevent authentication operations for local users with the\n user pools API in an Amazon Web Services SDK. The only way to prevent API-based authentication is to\n block access with a WAF rule.

" + "smithy.api#documentation": "

A list of provider names for the identity providers (IdPs) that are supported on this\n client. The following are supported: COGNITO, Facebook,\n Google, SignInWithApple, and LoginWithAmazon.\n You can also specify the names that you configured for the SAML and OIDC IdPs in your\n user pool, for example MySAMLIdP or MyOIDCIdP.

\n

This setting applies to providers that you can access with managed \n login. The removal of COGNITO\n from this list doesn't prevent authentication operations for local users with the\n user pools API in an Amazon Web Services SDK. The only way to prevent API-based authentication is to\n block access with a WAF rule.

" } }, "CallbackURLs": { @@ -16239,7 +16239,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the Secure Sockets Layer (SSL) certificate for the custom domain for your user\n pool.

\n

You can use this operation to provide the Amazon Resource Name (ARN) of a new\n certificate to Amazon Cognito. You can't use it to change the domain for a user pool.

\n

A custom domain is used to host the Amazon Cognito hosted UI, which provides sign-up and\n sign-in pages for your application. When you set up a custom domain, you provide a\n certificate that you manage with Certificate Manager (ACM). When necessary, you can use this\n operation to change the certificate that you applied to your custom domain.

\n

Usually, this is unnecessary following routine certificate renewal with ACM. When\n you renew your existing certificate in ACM, the ARN for your certificate remains the\n same, and your custom domain uses the new certificate automatically.

\n

However, if you replace your existing certificate with a new one, ACM gives the new\n certificate a new ARN. To apply the new certificate to your custom domain, you must\n provide this ARN to Amazon Cognito.

\n

When you add your new certificate in ACM, you must choose US East (N. Virginia) as\n the Amazon Web Services Region.

\n

After you submit your request, Amazon Cognito requires up to 1 hour to distribute your new\n certificate to your custom domain.

\n

For more information about adding a custom domain to your user pool, see Using Your Own Domain for the Hosted UI.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" + "smithy.api#documentation": "

A user pool domain hosts managed login, an authorization server and web server for\n authentication in your application. This operation updates the branding version for user\n pool domains between 1 for hosted UI (classic) and 2 for\n managed login. It also updates the SSL certificate for user pool custom domains.

\n

Changes to the domain branding version take up to one minute to take effect for a\n prefix domain and up to five minutes for a custom domain.

\n

This operation doesn't change the name of your user pool domain. To change your\n domain, delete it with DeleteUserPoolDomain and create a new domain with\n CreateUserPoolDomain.

\n

You can pass the ARN of a new Certificate Manager certificate in this request. Typically, ACM\n certificates automatically renew and you user pool can continue to use the same ARN. But\n if you generate a new certificate for your custom domain name, replace the original\n configuration with the new ARN in this request.

\n

ACM certificates for custom domains must be in the US East (N. Virginia)\n Amazon Web Services Region. After you submit your request, Amazon Cognito requires up to 1 hour to distribute\n your new certificate to your custom domain.

\n

For more information about adding a custom domain to your user pool, see Configuring a user pool domain.

\n \n

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For\n this operation, you must use IAM credentials to authorize requests, and you must\n grant yourself the corresponding IAM permission in a policy.

\n

\n Learn more\n

\n \n
" } }, "com.amazonaws.cognitoidentityprovider#UpdateUserPoolDomainRequest": { @@ -16268,8 +16268,7 @@ "CustomDomainConfig": { "target": "com.amazonaws.cognitoidentityprovider#CustomDomainConfigType", "traits": { - "smithy.api#documentation": "

The configuration for a custom domain that hosts the sign-up and sign-in pages for\n your application. Use this object to specify an SSL certificate that is managed by\n ACM.

\n

When you create a custom domain, the passkey RP ID defaults to the custom domain. If\n you had a prefix domain active, this will cause passkey integration for your prefix\n domain to stop working due to a mismatch in RP ID. To keep the prefix domain passkey\n integration working, you can explicitly set RP ID to the prefix domain. Update the RP ID\n in a SetUserPoolMfaConfig request.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The configuration for a custom domain that hosts the sign-up and sign-in pages for\n your application. Use this object to specify an SSL certificate that is managed by\n ACM.

\n

When you create a custom domain, the passkey RP ID defaults to the custom domain. If\n you had a prefix domain active, this will cause passkey integration for your prefix\n domain to stop working due to a mismatch in RP ID. To keep the prefix domain passkey\n integration working, you can explicitly set RP ID to the prefix domain. Update the RP ID\n in a SetUserPoolMfaConfig request.

" } } }, @@ -16305,7 +16304,7 @@ "UserPoolId": { "target": "com.amazonaws.cognitoidentityprovider#UserPoolIdType", "traits": { - "smithy.api#documentation": "

The user pool ID for the user pool you want to update.

", + "smithy.api#documentation": "

The ID of the user pool you want to update.

", "smithy.api#required": {} } }, @@ -16882,7 +16881,7 @@ "SupportedIdentityProviders": { "target": "com.amazonaws.cognitoidentityprovider#SupportedIdentityProvidersListType", "traits": { - "smithy.api#documentation": "

A list of provider names for the identity providers (IdPs) that are supported on this\n client. The following are supported: COGNITO, Facebook,\n Google, SignInWithApple, and LoginWithAmazon.\n You can also specify the names that you configured for the SAML and OIDC IdPs in your\n user pool, for example MySAMLIdP or MyOIDCIdP.

\n

This setting applies to providers that you can access with the hosted\n UI and OAuth 2.0 authorization server. The removal of COGNITO\n from this list doesn't prevent authentication operations for local users with the\n user pools API in an Amazon Web Services SDK. The only way to prevent API-based authentication is to\n block access with a WAF rule.

" + "smithy.api#documentation": "

A list of provider names for the identity providers (IdPs) that are supported on this\n client. The following are supported: COGNITO, Facebook,\n Google, SignInWithApple, and LoginWithAmazon.\n You can also specify the names that you configured for the SAML and OIDC IdPs in your\n user pool, for example MySAMLIdP or MyOIDCIdP.

\n

This setting applies to providers that you can access with managed \n login. The removal of COGNITO\n from this list doesn't prevent authentication operations for local users with the\n user pools API in an Amazon Web Services SDK. The only way to prevent API-based authentication is to\n block access with a WAF rule.

" } }, "CallbackURLs": { @@ -17880,7 +17879,7 @@ "UserVerification": { "target": "com.amazonaws.cognitoidentityprovider#UserVerificationType", "traits": { - "smithy.api#documentation": "

Sets or displays your user-pool treatment for MFA with a passkey. You can override\n other MFA options and require passkey MFA, or you can set it as preferred. When passkey\n MFA is preferred, the hosted UI encourages users to register a passkey at\n sign-in.

" + "smithy.api#documentation": "

When required, users can only register and sign in users with passkeys\n that are capable of user\n verification. When preferred, your user pool doesn't\n require the use of authenticators with user verification but encourages it.

" } } }, diff --git a/models/connect.json b/models/connect.json index fa7e3be511..b7d3ffc65e 100644 --- a/models/connect.json +++ b/models/connect.json @@ -979,6 +979,9 @@ { "target": "com.amazonaws.connect#CreateHoursOfOperation" }, + { + "target": "com.amazonaws.connect#CreateHoursOfOperationOverride" + }, { "target": "com.amazonaws.connect#CreateInstance" }, @@ -997,6 +1000,9 @@ { "target": "com.amazonaws.connect#CreatePrompt" }, + { + "target": "com.amazonaws.connect#CreatePushNotificationRegistration" + }, { "target": "com.amazonaws.connect#CreateQueue" }, @@ -1060,6 +1066,9 @@ { "target": "com.amazonaws.connect#DeleteHoursOfOperation" }, + { + "target": "com.amazonaws.connect#DeleteHoursOfOperationOverride" + }, { "target": "com.amazonaws.connect#DeleteInstance" }, @@ -1072,6 +1081,9 @@ { "target": "com.amazonaws.connect#DeletePrompt" }, + { + "target": "com.amazonaws.connect#DeletePushNotificationRegistration" + }, { "target": "com.amazonaws.connect#DeleteQueue" }, @@ -1138,6 +1150,9 @@ { "target": "com.amazonaws.connect#DescribeHoursOfOperation" }, + { + "target": "com.amazonaws.connect#DescribeHoursOfOperationOverride" + }, { "target": "com.amazonaws.connect#DescribeInstance" }, @@ -1243,6 +1258,9 @@ { "target": "com.amazonaws.connect#GetCurrentUserData" }, + { + "target": "com.amazonaws.connect#GetEffectiveHoursOfOperations" + }, { "target": "com.amazonaws.connect#GetFederationToken" }, @@ -1312,6 +1330,9 @@ { "target": "com.amazonaws.connect#ListFlowAssociations" }, + { + "target": "com.amazonaws.connect#ListHoursOfOperationOverrides" + }, { "target": "com.amazonaws.connect#ListHoursOfOperations" }, @@ -1447,6 +1468,9 @@ { "target": "com.amazonaws.connect#SearchEmailAddresses" }, + { + "target": "com.amazonaws.connect#SearchHoursOfOperationOverrides" + }, { "target": "com.amazonaws.connect#SearchHoursOfOperations" }, @@ -1597,6 +1621,9 @@ { "target": "com.amazonaws.connect#UpdateHoursOfOperation" }, + { + "target": "com.amazonaws.connect#UpdateHoursOfOperationOverride" + }, { "target": "com.amazonaws.connect#UpdateInstanceAttribute" }, @@ -1712,7 +1739,7 @@ "name": "connect" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "\n

Amazon Connect is a cloud-based contact center solution that you use to set up and\n manage a customer contact center and provide reliable customer engagement at any scale.

\n

Amazon Connect provides metrics and real-time reporting that enable you to optimize\n contact routing. You can also resolve customer issues more efficiently by getting customers in\n touch with the appropriate agents.

\n

There are limits to the number of Amazon Connect resources that you can create. There\n are also limits to the number of requests that you can make per second. For more information, see\n Amazon Connect Service Quotas in the Amazon Connect Administrator\n Guide.

\n

You can use an endpoint to connect programmatically to an Amazon Web Services service. For\n a list of Amazon Connect endpoints, see Amazon Connect Endpoints.

", + "smithy.api#documentation": "\n

Amazon Connect is a cloud-based contact center solution that you use to set up and\n manage a customer contact center and provide reliable customer engagement at any scale.

\n

Amazon Connect provides metrics and real-time reporting that enable you to optimize\n contact routing. You can also resolve customer issues more efficiently by getting customers in\n touch with the appropriate agents.

\n

There are limits to the number of Amazon Connect resources that you can create. There\n are also limits to the number of requests that you can make per second. For more information, see\n Amazon Connect Service Quotas in the Amazon Connect Administrator\n Guide.

\n

You can use an endpoint to connect programmatically to an Amazon Web Services service. For a\n list of Amazon Connect endpoints, see Amazon Connect Endpoints.

", "smithy.api#title": "Amazon Connect Service", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -3092,7 +3119,7 @@ "ResourceId": { "target": "com.amazonaws.connect#ARN", "traits": { - "smithy.api#documentation": "

The identifier of the resource.

\n
    \n
  • \n

    Amazon Web Services End User Messaging SMS phone number ARN when using SMS_PHONE_NUMBER\n

    \n
  • \n
  • \n

    Amazon Web Services End User Messaging Social phone number ARN when using WHATSAPP_MESSAGING_PHONE_NUMBER\n

    \n
  • \n
", + "smithy.api#documentation": "

The identifier of the resource.

\n
    \n
  • \n

    Amazon Web Services End User Messaging SMS phone number ARN when using\n SMS_PHONE_NUMBER\n

    \n
  • \n
  • \n

    Amazon Web Services End User Messaging Social phone number ARN when using\n WHATSAPP_MESSAGING_PHONE_NUMBER\n

    \n
  • \n
", "smithy.api#required": {} } }, @@ -4706,7 +4733,7 @@ "ResourceIds": { "target": "com.amazonaws.connect#resourceArnListMaxLimit100", "traits": { - "smithy.api#documentation": "

A list of resource identifiers to retrieve flow associations.

\n
    \n
  • \n

    Amazon Web Services End User Messaging SMS phone number ARN when using SMS_PHONE_NUMBER\n

    \n
  • \n
  • \n

    Amazon Web Services End User Messaging Social phone number ARN when using WHATSAPP_MESSAGING_PHONE_NUMBER\n

    \n
  • \n
", + "smithy.api#documentation": "

A list of resource identifiers to retrieve flow associations.

\n
    \n
  • \n

    Amazon Web Services End User Messaging SMS phone number ARN when using\n SMS_PHONE_NUMBER\n

    \n
  • \n
  • \n

    Amazon Web Services End User Messaging Social phone number ARN when using\n WHATSAPP_MESSAGING_PHONE_NUMBER\n

    \n
  • \n
", "smithy.api#required": {} } }, @@ -5266,7 +5293,7 @@ "SourcePhoneNumberArn": { "target": "com.amazonaws.connect#ARN", "traits": { - "smithy.api#documentation": "

The claimed phone number ARN that was previously imported from the external service, such as\n Amazon Web Services End User Messaging. If it is from Amazon Web Services End User Messaging, it looks like the ARN of the phone number\n that was imported from Amazon Web Services End User Messaging.

" + "smithy.api#documentation": "

The claimed phone number ARN that was previously imported from the external service, such as\n Amazon Web Services End User Messaging. If it is from Amazon Web Services End User Messaging, it\n looks like the ARN of the phone number that was imported from Amazon Web Services End User\n Messaging.

" } } }, @@ -5303,6 +5330,18 @@ "target": "com.amazonaws.connect#CommonAttributeAndCondition" } }, + "com.amazonaws.connect#CommonHumanReadableDescription": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[\\P{C}\\r\\n\\t]{1,250}$" + } + }, + "com.amazonaws.connect#CommonHumanReadableName": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[\\P{C}\\r\\n\\t]{1,127}$" + } + }, "com.amazonaws.connect#CommonNameLength127": { "type": "string", "traits": { @@ -5444,7 +5483,7 @@ } }, "traits": { - "smithy.api#documentation": "

A conditional check failed.

", + "smithy.api#documentation": "

Request processing failed because dependent condition failed.

", "smithy.api#error": "client", "smithy.api#httpError": 409 } @@ -5723,6 +5762,34 @@ "smithy.api#documentation": "

A structure that defines search criteria for contacts using analysis outputs from Amazon Connect Contact Lens.

" } }, + "com.amazonaws.connect#ContactConfiguration": { + "type": "structure", + "members": { + "ContactId": { + "target": "com.amazonaws.connect#ContactId", + "traits": { + "smithy.api#documentation": "

The identifier of the contact within the Amazon Connect instance.

", + "smithy.api#required": {} + } + }, + "ParticipantRole": { + "target": "com.amazonaws.connect#ParticipantRole", + "traits": { + "smithy.api#documentation": "

The role of the participant in the chat conversation.

\n \n

Only CUSTOMER is currently supported. Any other values other than\n CUSTOMER will result in an exception (4xx error).

\n
" + } + }, + "IncludeRawMessage": { + "target": "com.amazonaws.connect#IncludeRawMessage", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

Whether to include raw connect message in the push notification payload. Default is\n False.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The contact configuration for push notification registration.

" + } + }, "com.amazonaws.connect#ContactDataRequest": { "type": "structure", "members": { @@ -6017,6 +6084,18 @@ }, "StringCondition": { "target": "com.amazonaws.connect#StringCondition" + }, + "StateCondition": { + "target": "com.amazonaws.connect#ContactFlowModuleState", + "traits": { + "smithy.api#documentation": "

The state of the flow.

" + } + }, + "StatusCondition": { + "target": "com.amazonaws.connect#ContactFlowModuleStatus", + "traits": { + "smithy.api#documentation": "

The status of the flow.

" + } } }, "traits": { @@ -7782,6 +7861,118 @@ } } }, + "com.amazonaws.connect#CreateHoursOfOperationOverride": { + "type": "operation", + "input": { + "target": "com.amazonaws.connect#CreateHoursOfOperationOverrideRequest" + }, + "output": { + "target": "com.amazonaws.connect#CreateHoursOfOperationOverrideResponse" + }, + "errors": [ + { + "target": "com.amazonaws.connect#DuplicateResourceException" + }, + { + "target": "com.amazonaws.connect#InternalServiceException" + }, + { + "target": "com.amazonaws.connect#InvalidParameterException" + }, + { + "target": "com.amazonaws.connect#InvalidRequestException" + }, + { + "target": "com.amazonaws.connect#LimitExceededException" + }, + { + "target": "com.amazonaws.connect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.connect#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates an hours of operation override in an Amazon Connect hours of operation\n resource

", + "smithy.api#http": { + "method": "PUT", + "uri": "/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides", + "code": 200 + } + } + }, + "com.amazonaws.connect#CreateHoursOfOperationOverrideRequest": { + "type": "structure", + "members": { + "InstanceId": { + "target": "com.amazonaws.connect#InstanceId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Connect instance.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "HoursOfOperationId": { + "target": "com.amazonaws.connect#HoursOfOperationId", + "traits": { + "smithy.api#documentation": "

The identifier for the hours of operation

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "Name": { + "target": "com.amazonaws.connect#CommonHumanReadableName", + "traits": { + "smithy.api#documentation": "

The name of the hours of operation override.

", + "smithy.api#required": {} + } + }, + "Description": { + "target": "com.amazonaws.connect#CommonHumanReadableDescription", + "traits": { + "smithy.api#documentation": "

The description of the hours of operation override.

" + } + }, + "Config": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideConfigList", + "traits": { + "smithy.api#documentation": "

Configuration information for the hours of operation override: day, start time, and end\n time.

", + "smithy.api#required": {} + } + }, + "EffectiveFrom": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideYearMonthDayDateFormat", + "traits": { + "smithy.api#documentation": "

The date from when the hours of operation override would be effective.

", + "smithy.api#required": {} + } + }, + "EffectiveTill": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideYearMonthDayDateFormat", + "traits": { + "smithy.api#documentation": "

The date until when the hours of operation override would be effective.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.connect#CreateHoursOfOperationOverrideResponse": { + "type": "structure", + "members": { + "HoursOfOperationOverrideId": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideId", + "traits": { + "smithy.api#documentation": "

The identifier for the hours of operation override.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.connect#CreateHoursOfOperationRequest": { "type": "structure", "members": { @@ -8013,7 +8204,7 @@ "IntegrationArn": { "target": "com.amazonaws.connect#ARN", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the integration.

\n \n

When integrating with Amazon Web Services End User Messaging, the Amazon Connect and Amazon Web Services End User Messaging\n instances must be in the same account.

\n
", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the integration.

\n \n

When integrating with Amazon Web Services End User Messaging, the Amazon Connect and\n Amazon Web Services End User Messaging instances must be in the same account.

\n
", "smithy.api#required": {} } }, @@ -8420,6 +8611,109 @@ "smithy.api#output": {} } }, + "com.amazonaws.connect#CreatePushNotificationRegistration": { + "type": "operation", + "input": { + "target": "com.amazonaws.connect#CreatePushNotificationRegistrationRequest" + }, + "output": { + "target": "com.amazonaws.connect#CreatePushNotificationRegistrationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.connect#AccessDeniedException" + }, + { + "target": "com.amazonaws.connect#InternalServiceException" + }, + { + "target": "com.amazonaws.connect#InvalidParameterException" + }, + { + "target": "com.amazonaws.connect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.connect#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.connect#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates registration for a device token and a chat contact to receive real-time push\n notifications. For more information about push notifications, see Set up push\n notifications in Amazon Connect for mobile chat in the Amazon Connect\n Administrator Guide.

", + "smithy.api#http": { + "method": "PUT", + "uri": "/push-notification/{InstanceId}/registrations", + "code": 200 + } + } + }, + "com.amazonaws.connect#CreatePushNotificationRegistrationRequest": { + "type": "structure", + "members": { + "InstanceId": { + "target": "com.amazonaws.connect#InstanceId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Connect instance. You can find the instance ID in the\n Amazon Resource Name (ARN) of the instance.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "ClientToken": { + "target": "com.amazonaws.connect#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If not provided, the Amazon Web Services\n SDK populates this field. For more information about idempotency, see\n Making retries safe with idempotent APIs.

", + "smithy.api#idempotencyToken": {} + } + }, + "PinpointAppArn": { + "target": "com.amazonaws.connect#ARN", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Pinpoint application.

", + "smithy.api#required": {} + } + }, + "DeviceToken": { + "target": "com.amazonaws.connect#DeviceToken", + "traits": { + "smithy.api#documentation": "

The push notification token issued by the Apple or Google gateways.

", + "smithy.api#required": {} + } + }, + "DeviceType": { + "target": "com.amazonaws.connect#DeviceType", + "traits": { + "smithy.api#documentation": "

The device type to use when sending the message.

", + "smithy.api#required": {} + } + }, + "ContactConfiguration": { + "target": "com.amazonaws.connect#ContactConfiguration", + "traits": { + "smithy.api#documentation": "

The contact configuration for push notification registration.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.connect#CreatePushNotificationRegistrationResponse": { + "type": "structure", + "members": { + "RegistrationId": { + "target": "com.amazonaws.connect#RegistrationId", + "traits": { + "smithy.api#documentation": "

The identifier for the registration.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.connect#CreateQueue": { "type": "operation", "input": { @@ -8452,7 +8746,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API is in preview release for Amazon Connect and is subject to change.

\n

Creates a new queue for the specified Amazon Connect instance.

\n \n
    \n
  • \n

    If the phone number is claimed to a traffic distribution group that was created in the\n same Region as the Amazon Connect instance where you are calling this API, then you can use a\n full phone number ARN or a UUID for OutboundCallerIdNumberId. However, if the phone number is claimed\n to a traffic distribution group that is in one Region, and you are calling this API from an instance in another Amazon Web Services Region that is associated with the traffic distribution group, you must provide a full phone number ARN. If a\n UUID is provided in this scenario, you will receive a\n ResourceNotFoundException.

    \n
  • \n
  • \n

    Only use the phone number ARN format that doesn't contain instance in the\n path, for example, arn:aws:connect:us-east-1:1234567890:phone-number/uuid. This\n is the same ARN format that is returned when you call the ListPhoneNumbersV2\n API.

    \n
  • \n
  • \n

    If you plan to use IAM policies to allow/deny access to this API for phone\n number resources claimed to a traffic distribution group, see Allow or Deny queue API actions for phone numbers in a replica Region.

    \n
  • \n
\n
", + "smithy.api#documentation": "

Creates a new queue for the specified Amazon Connect instance.

\n \n
    \n
  • \n

    If the phone number is claimed to a traffic distribution group that was created in the\n same Region as the Amazon Connect instance where you are calling this API, then you can use a\n full phone number ARN or a UUID for OutboundCallerIdNumberId. However, if the phone number is claimed\n to a traffic distribution group that is in one Region, and you are calling this API from an instance in another Amazon Web Services Region that is associated with the traffic distribution group, you must provide a full phone number ARN. If a\n UUID is provided in this scenario, you will receive a\n ResourceNotFoundException.

    \n
  • \n
  • \n

    Only use the phone number ARN format that doesn't contain instance in the\n path, for example, arn:aws:connect:us-east-1:1234567890:phone-number/uuid. This\n is the same ARN format that is returned when you call the ListPhoneNumbersV2\n API.

    \n
  • \n
  • \n

    If you plan to use IAM policies to allow/deny access to this API for phone\n number resources claimed to a traffic distribution group, see Allow or Deny queue API actions for phone numbers in a replica Region.

    \n
  • \n
\n
", "smithy.api#http": { "method": "PUT", "uri": "/queues/{InstanceId}", @@ -10247,6 +10541,67 @@ "target": "com.amazonaws.connect#DataSetId" } }, + "com.amazonaws.connect#DateComparisonType": { + "type": "enum", + "members": { + "GREATER_THAN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GREATER_THAN" + } + }, + "LESS_THAN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LESS_THAN" + } + }, + "GREATER_THAN_OR_EQUAL_TO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GREATER_THAN_OR_EQUAL_TO" + } + }, + "LESS_THAN_OR_EQUAL_TO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LESS_THAN_OR_EQUAL_TO" + } + }, + "EQUAL_TO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EQUAL_TO" + } + } + } + }, + "com.amazonaws.connect#DateCondition": { + "type": "structure", + "members": { + "FieldName": { + "target": "com.amazonaws.connect#String", + "traits": { + "smithy.api#documentation": "

An object to specify the hours of operation override date field.

" + } + }, + "Value": { + "target": "com.amazonaws.connect#DateYearMonthDayFormat", + "traits": { + "smithy.api#documentation": "

An object to specify the hours of operation override date value.

" + } + }, + "ComparisonType": { + "target": "com.amazonaws.connect#DateComparisonType", + "traits": { + "smithy.api#documentation": "

An object to specify the hours of operation override date condition\n comparisonType.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An object to specify the hours of operation override date condition.

" + } + }, "com.amazonaws.connect#DateReference": { "type": "structure", "members": { @@ -10267,6 +10622,12 @@ "smithy.api#documentation": "

Information about a reference when the referenceType is DATE.\n Otherwise, null.

" } }, + "com.amazonaws.connect#DateYearMonthDayFormat": { + "type": "string", + "traits": { + "smithy.api#pattern": "^\\d{4}-\\d{2}-\\d{2}$" + } + }, "com.amazonaws.connect#DeactivateEvaluationForm": { "type": "operation", "input": { @@ -10857,6 +11218,72 @@ } } }, + "com.amazonaws.connect#DeleteHoursOfOperationOverride": { + "type": "operation", + "input": { + "target": "com.amazonaws.connect#DeleteHoursOfOperationOverrideRequest" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.connect#InternalServiceException" + }, + { + "target": "com.amazonaws.connect#InvalidParameterException" + }, + { + "target": "com.amazonaws.connect#InvalidRequestException" + }, + { + "target": "com.amazonaws.connect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.connect#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes an hours of operation override in an Amazon Connect hours of operation\n resource

", + "smithy.api#http": { + "method": "DELETE", + "uri": "/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides/{HoursOfOperationOverrideId}", + "code": 200 + } + } + }, + "com.amazonaws.connect#DeleteHoursOfOperationOverrideRequest": { + "type": "structure", + "members": { + "InstanceId": { + "target": "com.amazonaws.connect#InstanceId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Connect instance.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "HoursOfOperationId": { + "target": "com.amazonaws.connect#HoursOfOperationId", + "traits": { + "smithy.api#documentation": "

The identifier for the hours of operation.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "HoursOfOperationOverrideId": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideId", + "traits": { + "smithy.api#documentation": "

The identifier for the hours of operation override.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.connect#DeleteHoursOfOperationRequest": { "type": "structure", "members": { @@ -11100,6 +11527,79 @@ "smithy.api#input": {} } }, + "com.amazonaws.connect#DeletePushNotificationRegistration": { + "type": "operation", + "input": { + "target": "com.amazonaws.connect#DeletePushNotificationRegistrationRequest" + }, + "output": { + "target": "com.amazonaws.connect#DeletePushNotificationRegistrationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.connect#AccessDeniedException" + }, + { + "target": "com.amazonaws.connect#InternalServiceException" + }, + { + "target": "com.amazonaws.connect#InvalidParameterException" + }, + { + "target": "com.amazonaws.connect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.connect#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes registration for a device token and a chat contact.

", + "smithy.api#http": { + "method": "DELETE", + "uri": "/push-notification/{InstanceId}/registrations/{RegistrationId}", + "code": 200 + } + } + }, + "com.amazonaws.connect#DeletePushNotificationRegistrationRequest": { + "type": "structure", + "members": { + "InstanceId": { + "target": "com.amazonaws.connect#InstanceId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Connect instance. You can find the instance ID in the\n Amazon Resource Name (ARN) of the instance.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "RegistrationId": { + "target": "com.amazonaws.connect#RegistrationId", + "traits": { + "smithy.api#documentation": "

The identifier for the registration.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "ContactId": { + "target": "com.amazonaws.connect#ContactId", + "traits": { + "smithy.api#documentation": "

The identifier of the contact within the Amazon Connect instance.

", + "smithy.api#httpQuery": "contactId", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.connect#DeletePushNotificationRegistrationResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.connect#DeleteQueue": { "type": "operation", "input": { @@ -12619,6 +13119,86 @@ } } }, + "com.amazonaws.connect#DescribeHoursOfOperationOverride": { + "type": "operation", + "input": { + "target": "com.amazonaws.connect#DescribeHoursOfOperationOverrideRequest" + }, + "output": { + "target": "com.amazonaws.connect#DescribeHoursOfOperationOverrideResponse" + }, + "errors": [ + { + "target": "com.amazonaws.connect#InternalServiceException" + }, + { + "target": "com.amazonaws.connect#InvalidParameterException" + }, + { + "target": "com.amazonaws.connect#InvalidRequestException" + }, + { + "target": "com.amazonaws.connect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.connect#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Describes the hours of operation override.

", + "smithy.api#http": { + "method": "GET", + "uri": "/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides/{HoursOfOperationOverrideId}", + "code": 200 + } + } + }, + "com.amazonaws.connect#DescribeHoursOfOperationOverrideRequest": { + "type": "structure", + "members": { + "InstanceId": { + "target": "com.amazonaws.connect#InstanceId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Connect instance.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "HoursOfOperationId": { + "target": "com.amazonaws.connect#HoursOfOperationId", + "traits": { + "smithy.api#documentation": "

The identifier for the hours of operation.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "HoursOfOperationOverrideId": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideId", + "traits": { + "smithy.api#documentation": "

The identifier for the hours of operation override.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.connect#DescribeHoursOfOperationOverrideResponse": { + "type": "structure", + "members": { + "HoursOfOperationOverride": { + "target": "com.amazonaws.connect#HoursOfOperationOverride", + "traits": { + "smithy.api#documentation": "

Information about the hours of operations override.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.connect#DescribeHoursOfOperationRequest": { "type": "structure", "members": { @@ -12899,7 +13479,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets details and status of a phone number that’s claimed to your Amazon Connect instance\n or traffic distribution group.

\n \n

If the number is claimed to a traffic distribution group, and you are calling in the Amazon Web Services Region\n where the traffic distribution group was created, you can use either a phone number ARN or UUID value for the\n PhoneNumberId URI request parameter. However, if the number is claimed to a traffic distribution group\n and you are calling this API in the alternate Amazon Web Services Region associated with the\n traffic distribution group, you must provide a full phone number ARN. If a UUID is provided\n in\n this scenario, you receive a\n ResourceNotFoundException.

\n
", + "smithy.api#documentation": "

Gets details and status of a phone number that’s claimed to your Amazon Connect instance\n or traffic distribution group.

\n \n

If the number is claimed to a traffic distribution group, and you are calling in the Amazon Web Services Region\n where the traffic distribution group was created, you can use either a phone number ARN or UUID value for the\n PhoneNumberId URI request parameter. However, if the number is claimed to a traffic distribution group\n and you are calling this API in the alternate Amazon Web Services Region associated with the\n traffic distribution group, you must provide a full phone number ARN. If a UUID is provided\n in\n this scenario, you receive a ResourceNotFoundException.

\n
", "smithy.api#http": { "method": "GET", "uri": "/phone-number/{PhoneNumberId}", @@ -13937,6 +14517,38 @@ "smithy.api#documentation": "

Information regarding the device.

" } }, + "com.amazonaws.connect#DeviceToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 500 + } + } + }, + "com.amazonaws.connect#DeviceType": { + "type": "enum", + "members": { + "GCM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GCM" + } + }, + "APNS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "APNS" + } + }, + "APNS_SANDBOX": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "APNS_SANDBOX" + } + } + } + }, "com.amazonaws.connect#Dimensions": { "type": "structure", "members": { @@ -14256,7 +14868,7 @@ "ResourceId": { "target": "com.amazonaws.connect#ARN", "traits": { - "smithy.api#documentation": "

The identifier of the resource.

\n
    \n
  • \n

    Amazon Web Services End User Messaging SMS phone number ARN when using SMS_PHONE_NUMBER\n

    \n
  • \n
  • \n

    Amazon Web Services End User Messaging Social phone number ARN when using WHATSAPP_MESSAGING_PHONE_NUMBER\n

    \n
  • \n
", + "smithy.api#documentation": "

The identifier of the resource.

\n
    \n
  • \n

    Amazon Web Services End User Messaging SMS phone number ARN when using\n SMS_PHONE_NUMBER\n

    \n
  • \n
  • \n

    Amazon Web Services End User Messaging Social phone number ARN when using\n WHATSAPP_MESSAGING_PHONE_NUMBER\n

    \n
  • \n
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -15042,6 +15654,32 @@ "com.amazonaws.connect#DurationInSeconds": { "type": "integer" }, + "com.amazonaws.connect#EffectiveHoursOfOperationList": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#EffectiveHoursOfOperations" + } + }, + "com.amazonaws.connect#EffectiveHoursOfOperations": { + "type": "structure", + "members": { + "Date": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideYearMonthDayDateFormat", + "traits": { + "smithy.api#documentation": "

The date that the hours of operation or overrides applies to.

" + } + }, + "OperationalHours": { + "target": "com.amazonaws.connect#OperationalHours", + "traits": { + "smithy.api#documentation": "

Information about the hours of operations with the effective override applied.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the hours of operations with the effective override applied.

" + } + }, "com.amazonaws.connect#Email": { "type": "string", "traits": { @@ -17868,6 +18506,100 @@ "smithy.api#output": {} } }, + "com.amazonaws.connect#GetEffectiveHoursOfOperations": { + "type": "operation", + "input": { + "target": "com.amazonaws.connect#GetEffectiveHoursOfOperationsRequest" + }, + "output": { + "target": "com.amazonaws.connect#GetEffectiveHoursOfOperationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.connect#InternalServiceException" + }, + { + "target": "com.amazonaws.connect#InvalidParameterException" + }, + { + "target": "com.amazonaws.connect#InvalidRequestException" + }, + { + "target": "com.amazonaws.connect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.connect#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Get the hours of operations with the effective override applied.

", + "smithy.api#http": { + "method": "GET", + "uri": "/effective-hours-of-operations/{InstanceId}/{HoursOfOperationId}", + "code": 200 + } + } + }, + "com.amazonaws.connect#GetEffectiveHoursOfOperationsRequest": { + "type": "structure", + "members": { + "InstanceId": { + "target": "com.amazonaws.connect#InstanceId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Connect instance.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "HoursOfOperationId": { + "target": "com.amazonaws.connect#HoursOfOperationId", + "traits": { + "smithy.api#documentation": "

The identifier for the hours of operation.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "FromDate": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideYearMonthDayDateFormat", + "traits": { + "smithy.api#documentation": "

The Date from when the hours of operation are listed.

", + "smithy.api#httpQuery": "fromDate", + "smithy.api#required": {} + } + }, + "ToDate": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideYearMonthDayDateFormat", + "traits": { + "smithy.api#documentation": "

The Date until when the hours of operation are listed.

", + "smithy.api#httpQuery": "toDate", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.connect#GetEffectiveHoursOfOperationsResponse": { + "type": "structure", + "members": { + "EffectiveHoursOfOperationList": { + "target": "com.amazonaws.connect#EffectiveHoursOfOperationList", + "traits": { + "smithy.api#documentation": "

Information about the effective hours of operations

" + } + }, + "TimeZone": { + "target": "com.amazonaws.connect#TimeZone", + "traits": { + "smithy.api#documentation": "

The time zone for the hours of operation.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.connect#GetFederationToken": { "type": "operation", "input": { @@ -18004,7 +18736,7 @@ "ResourceId": { "target": "com.amazonaws.connect#ARN", "traits": { - "smithy.api#documentation": "

The identifier of the resource.

\n
    \n
  • \n

    Amazon Web Services End User Messaging SMS phone number ARN when using SMS_PHONE_NUMBER\n

    \n
  • \n
  • \n

    Amazon Web Services End User Messaging Social phone number ARN when using WHATSAPP_MESSAGING_PHONE_NUMBER\n

    \n
  • \n
", + "smithy.api#documentation": "

The identifier of the resource.

\n
    \n
  • \n

    Amazon Web Services End User Messaging SMS phone number ARN when using\n SMS_PHONE_NUMBER\n

    \n
  • \n
  • \n

    Amazon Web Services End User Messaging Social phone number ARN when using\n WHATSAPP_MESSAGING_PHONE_NUMBER\n

    \n
  • \n
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -19570,6 +20302,156 @@ "com.amazonaws.connect#HoursOfOperationName": { "type": "string" }, + "com.amazonaws.connect#HoursOfOperationOverride": { + "type": "structure", + "members": { + "HoursOfOperationOverrideId": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideId", + "traits": { + "smithy.api#documentation": "

The identifier for the hours of operation override.

" + } + }, + "HoursOfOperationId": { + "target": "com.amazonaws.connect#HoursOfOperationId", + "traits": { + "smithy.api#documentation": "

The identifier for the hours of operation.

" + } + }, + "HoursOfOperationArn": { + "target": "com.amazonaws.connect#ARN", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the hours of operation.

" + } + }, + "Name": { + "target": "com.amazonaws.connect#CommonHumanReadableName", + "traits": { + "smithy.api#documentation": "

The name of the hours of operation override.

" + } + }, + "Description": { + "target": "com.amazonaws.connect#CommonHumanReadableDescription", + "traits": { + "smithy.api#documentation": "

The description of the hours of operation override.

" + } + }, + "Config": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideConfigList", + "traits": { + "smithy.api#documentation": "

Configuration information for the hours of operation override: day, start time, and end\n time.

" + } + }, + "EffectiveFrom": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideYearMonthDayDateFormat", + "traits": { + "smithy.api#documentation": "

The date from which the hours of operation override would be effective.

" + } + }, + "EffectiveTill": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideYearMonthDayDateFormat", + "traits": { + "smithy.api#documentation": "

The date till which the hours of operation override would be effective.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the hours of operations override.

" + } + }, + "com.amazonaws.connect#HoursOfOperationOverrideConfig": { + "type": "structure", + "members": { + "Day": { + "target": "com.amazonaws.connect#OverrideDays", + "traits": { + "smithy.api#documentation": "

The day that the hours of operation override applies to.

" + } + }, + "StartTime": { + "target": "com.amazonaws.connect#OverrideTimeSlice", + "traits": { + "smithy.api#documentation": "

The start time when your contact center opens if overrides are applied.

" + } + }, + "EndTime": { + "target": "com.amazonaws.connect#OverrideTimeSlice", + "traits": { + "smithy.api#documentation": "

The end time that your contact center closes if overrides are applied.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the hours of operation override config: day, start time, and end\n time.

" + } + }, + "com.amazonaws.connect#HoursOfOperationOverrideConfigList": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideConfig" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 100 + } + } + }, + "com.amazonaws.connect#HoursOfOperationOverrideId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 36 + } + } + }, + "com.amazonaws.connect#HoursOfOperationOverrideList": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#HoursOfOperationOverride" + } + }, + "com.amazonaws.connect#HoursOfOperationOverrideSearchConditionList": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideSearchCriteria" + } + }, + "com.amazonaws.connect#HoursOfOperationOverrideSearchCriteria": { + "type": "structure", + "members": { + "OrConditions": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideSearchConditionList", + "traits": { + "smithy.api#documentation": "

A list of conditions which would be applied together with an OR condition.

" + } + }, + "AndConditions": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideSearchConditionList", + "traits": { + "smithy.api#documentation": "

A list of conditions which would be applied together with an AND condition.

" + } + }, + "StringCondition": { + "target": "com.amazonaws.connect#StringCondition" + }, + "DateCondition": { + "target": "com.amazonaws.connect#DateCondition", + "traits": { + "smithy.api#documentation": "

A leaf node condition which can be used to specify a date condition.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The search criteria to be used to return hours of operations overrides.

" + } + }, + "com.amazonaws.connect#HoursOfOperationOverrideYearMonthDayDateFormat": { + "type": "string", + "traits": { + "smithy.api#pattern": "^\\d{4}-\\d{2}-\\d{2}$" + } + }, "com.amazonaws.connect#HoursOfOperationSearchConditionList": { "type": "list", "member": { @@ -19726,7 +20608,7 @@ } ], "traits": { - "smithy.api#documentation": "

Imports a claimed phone number from an external service, such as Amazon Web Services End User Messaging, into an\n Amazon Connect instance. You can call this API only in the same Amazon Web Services Region\n where the Amazon Connect instance was created.

\n \n

Call the DescribePhoneNumber API\n to verify the status of a previous ImportPhoneNumber operation.

\n
\n

If you plan to claim or import numbers and then release numbers frequently, contact us for a\n service quota exception. Otherwise, it is possible you will be blocked from claiming and\n releasing any more numbers until up to 180 days past the oldest number released has expired.

\n

By default you can claim or import and then release up to 200% of your maximum number of\n active phone numbers. If you claim or import and then release phone numbers using the UI or API\n during a rolling 180 day cycle that exceeds 200% of your phone number service level quota, you\n will be blocked from claiming or importing any more numbers until 180 days past the oldest number\n released has expired.

\n

For example, if you already have 99 claimed or imported numbers and a service level quota of\n 99 phone numbers, and in any 180 day period you release 99, claim 99, and then release 99, you\n will have exceeded the 200% limit. At that point you are blocked from claiming any more numbers\n until you open an Amazon Web Services Support ticket.

", + "smithy.api#documentation": "

Imports a claimed phone number from an external service, such as Amazon Web Services End User\n Messaging, into an Amazon Connect instance. You can call this API only in the same Amazon Web Services Region where the Amazon Connect instance was created.

\n \n

Call the DescribePhoneNumber API\n to verify the status of a previous ImportPhoneNumber operation.

\n
\n

If you plan to claim or import numbers and then release numbers frequently, contact us for a\n service quota exception. Otherwise, it is possible you will be blocked from claiming and\n releasing any more numbers until up to 180 days past the oldest number released has expired.

\n

By default you can claim or import and then release up to 200% of your maximum number of\n active phone numbers. If you claim or import and then release phone numbers using the UI or API\n during a rolling 180 day cycle that exceeds 200% of your phone number service level quota, you\n will be blocked from claiming or importing any more numbers until 180 days past the oldest number\n released has expired.

\n

For example, if you already have 99 claimed or imported numbers and a service level quota of\n 99 phone numbers, and in any 180 day period you release 99, claim 99, and then release 99, you\n will have exceeded the 200% limit. At that point you are blocked from claiming any more numbers\n until you open an Amazon Web Services Support ticket.

", "smithy.api#http": { "method": "POST", "uri": "/phone-number/import", @@ -19747,7 +20629,7 @@ "SourcePhoneNumberArn": { "target": "com.amazonaws.connect#ARN", "traits": { - "smithy.api#documentation": "

The claimed phone number ARN being imported from the external service, such as Amazon Web Services End User Messaging. If it is from Amazon Web Services End User Messaging, it looks like the ARN of the phone number to\n import from Amazon Web Services End User Messaging.

", + "smithy.api#documentation": "

The claimed phone number ARN being imported from the external service, such as Amazon Web Services End User Messaging. If it is from Amazon Web Services End User Messaging, it looks\n like the ARN of the phone number to import from Amazon Web Services End User Messaging.

", "smithy.api#required": {} } }, @@ -19895,6 +20777,12 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.connect#IncludeRawMessage": { + "type": "boolean", + "traits": { + "smithy.api#default": false + } + }, "com.amazonaws.connect#Index": { "type": "integer" }, @@ -22430,6 +23318,116 @@ "smithy.api#output": {} } }, + "com.amazonaws.connect#ListHoursOfOperationOverrides": { + "type": "operation", + "input": { + "target": "com.amazonaws.connect#ListHoursOfOperationOverridesRequest" + }, + "output": { + "target": "com.amazonaws.connect#ListHoursOfOperationOverridesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.connect#InternalServiceException" + }, + { + "target": "com.amazonaws.connect#InvalidParameterException" + }, + { + "target": "com.amazonaws.connect#InvalidRequestException" + }, + { + "target": "com.amazonaws.connect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.connect#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

List the hours of operation overrides.

", + "smithy.api#http": { + "method": "GET", + "uri": "/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "HoursOfOperationOverrideList", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.connect#ListHoursOfOperationOverridesRequest": { + "type": "structure", + "members": { + "InstanceId": { + "target": "com.amazonaws.connect#InstanceId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Connect instance.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "HoursOfOperationId": { + "target": "com.amazonaws.connect#HoursOfOperationId", + "traits": { + "smithy.api#documentation": "

The identifier for the hours of operation

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.connect#NextToken", + "traits": { + "smithy.api#documentation": "

The token for the next set of results. Use the value returned in the previous response in\n the next request to retrieve the next set of results.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "MaxResults": { + "target": "com.amazonaws.connect#MaxResult100", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return per page. The default MaxResult size is 100. Valid\n Range: Minimum value of 1. Maximum value of 1000.

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.connect#ListHoursOfOperationOverridesResponse": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.connect#NextToken", + "traits": { + "smithy.api#documentation": "

The token for the next set of results. Use the value returned in the previous response in\n the next request to retrieve the next set of results.

" + } + }, + "HoursOfOperationOverrideList": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideList", + "traits": { + "smithy.api#documentation": "

Information about the hours of operation override.

" + } + }, + "LastModifiedRegion": { + "target": "com.amazonaws.connect#RegionName", + "traits": { + "smithy.api#documentation": "

The AWS Region where this resource was last modified.

" + } + }, + "LastModifiedTime": { + "target": "com.amazonaws.connect#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp when this resource was last modified.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.connect#ListHoursOfOperations": { "type": "operation", "input": { @@ -23220,7 +24218,7 @@ "SourcePhoneNumberArn": { "target": "com.amazonaws.connect#ARN", "traits": { - "smithy.api#documentation": "

The claimed phone number ARN that was previously imported from the external service, such as\n Amazon Web Services End User Messaging. If it is from Amazon Web Services End User Messaging, it looks like the ARN of the phone number\n that was imported from Amazon Web Services End User Messaging.

" + "smithy.api#documentation": "

The claimed phone number ARN that was previously imported from the external service, such as\n Amazon Web Services End User Messaging. If it is from Amazon Web Services End User Messaging, it\n looks like the ARN of the phone number that was imported from Amazon Web Services End User\n Messaging.

" } } }, @@ -25914,7 +26912,7 @@ "MetricFilterValues": { "target": "com.amazonaws.connect#MetricFilterValueList", "traits": { - "smithy.api#documentation": "

The values to use for filtering data. Values for metric-level filters can be either a fixed\n set of values or a customized list, depending on the use case.

\n

For valid values of metric-level filters INITIATION_METHOD,\n DISCONNECT_REASON, and ANSWERING_MACHINE_DETECTION_STATUS, see ContactTraceRecord in the Amazon Connect Administrator Guide.

\n

For valid values of the metric-level filter FLOWS_OUTCOME_TYPE, see the\n description for the Flow outcome metric in the Amazon Connect Administrator\n Guide.

\n

For valid values of the metric-level filter BOT_CONVERSATION_OUTCOME_TYPE, see the\n description for the \n Bot conversations completed\n \n in the Amazon Connect Administrator\n Guide.

\n

For valid values of the metric-level filter BOT_INTENT_OUTCOME_TYPE, see the description for\n the \n Bot intents completed\n \n metric in the Amazon Connect Administrator\n Guide.

" + "smithy.api#documentation": "

The values to use for filtering data. Values for metric-level filters can be either a fixed\n set of values or a customized list, depending on the use case.

\n

For valid values of metric-level filters INITIATION_METHOD,\n DISCONNECT_REASON, and ANSWERING_MACHINE_DETECTION_STATUS, see ContactTraceRecord in the Amazon Connect Administrator Guide.

\n

For valid values of the metric-level filter FLOWS_OUTCOME_TYPE, see the\n description for the Flow outcome metric in the Amazon Connect Administrator\n Guide.

\n

For valid values of the metric-level filter BOT_CONVERSATION_OUTCOME_TYPE, see\n the description for the Bot\n conversations completed in the Amazon Connect Administrator\n Guide.

\n

For valid values of the metric-level filter BOT_INTENT_OUTCOME_TYPE, see the\n description for the Bot intents\n completed metric in the Amazon Connect Administrator\n Guide.

" } }, "Negate": { @@ -26475,6 +27473,32 @@ } } }, + "com.amazonaws.connect#OperationalHour": { + "type": "structure", + "members": { + "Start": { + "target": "com.amazonaws.connect#OverrideTimeSlice", + "traits": { + "smithy.api#documentation": "

The start time that your contact center opens.

" + } + }, + "End": { + "target": "com.amazonaws.connect#OverrideTimeSlice", + "traits": { + "smithy.api#documentation": "

The end time that your contact center closes.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the hours of operations with the effective override applied.

" + } + }, + "com.amazonaws.connect#OperationalHours": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#OperationalHour" + } + }, "com.amazonaws.connect#Origin": { "type": "string", "traits": { @@ -26681,6 +27705,77 @@ "smithy.api#httpError": 404 } }, + "com.amazonaws.connect#OverrideDays": { + "type": "enum", + "members": { + "SUNDAY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUNDAY" + } + }, + "MONDAY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MONDAY" + } + }, + "TUESDAY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TUESDAY" + } + }, + "WEDNESDAY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "WEDNESDAY" + } + }, + "THURSDAY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "THURSDAY" + } + }, + "FRIDAY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FRIDAY" + } + }, + "SATURDAY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SATURDAY" + } + } + } + }, + "com.amazonaws.connect#OverrideTimeSlice": { + "type": "structure", + "members": { + "Hours": { + "target": "com.amazonaws.connect#Hours24Format", + "traits": { + "smithy.api#default": null, + "smithy.api#documentation": "

The hours.

", + "smithy.api#required": {} + } + }, + "Minutes": { + "target": "com.amazonaws.connect#MinutesLimit60", + "traits": { + "smithy.api#default": null, + "smithy.api#documentation": "

The minutes.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The start time or end time for an hours of operation override.

" + } + }, "com.amazonaws.connect#PEM": { "type": "string", "traits": { @@ -31014,6 +32109,15 @@ "smithy.api#pattern": "^[a-z]{2}(-[a-z]+){1,2}(-[0-9])?$" } }, + "com.amazonaws.connect#RegistrationId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, "com.amazonaws.connect#RehydrationType": { "type": "enum", "members": { @@ -33212,6 +34316,108 @@ "smithy.api#output": {} } }, + "com.amazonaws.connect#SearchHoursOfOperationOverrides": { + "type": "operation", + "input": { + "target": "com.amazonaws.connect#SearchHoursOfOperationOverridesRequest" + }, + "output": { + "target": "com.amazonaws.connect#SearchHoursOfOperationOverridesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.connect#InternalServiceException" + }, + { + "target": "com.amazonaws.connect#InvalidParameterException" + }, + { + "target": "com.amazonaws.connect#InvalidRequestException" + }, + { + "target": "com.amazonaws.connect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.connect#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Searches the hours of operation overrides.

", + "smithy.api#http": { + "method": "POST", + "uri": "/search-hours-of-operation-overrides", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "HoursOfOperationOverrides", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.connect#SearchHoursOfOperationOverridesRequest": { + "type": "structure", + "members": { + "InstanceId": { + "target": "com.amazonaws.connect#InstanceId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Connect instance.

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.connect#NextToken2500", + "traits": { + "smithy.api#documentation": "

The token for the next set of results. Use the value returned in the previous response in\n the next request to retrieve the next set of results. Length Constraints: Minimum length of 1.\n Maximum length of 2500.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.connect#MaxResult100", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return per page. Valid Range: Minimum value of 1. Maximum\n value of 100.

" + } + }, + "SearchFilter": { + "target": "com.amazonaws.connect#HoursOfOperationSearchFilter" + }, + "SearchCriteria": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideSearchCriteria", + "traits": { + "smithy.api#documentation": "

The search criteria to be used to return hours of operations overrides.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.connect#SearchHoursOfOperationOverridesResponse": { + "type": "structure", + "members": { + "HoursOfOperationOverrides": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideList", + "traits": { + "smithy.api#documentation": "

Information about the hours of operations overrides.

" + } + }, + "NextToken": { + "target": "com.amazonaws.connect#NextToken2500", + "traits": { + "smithy.api#documentation": "

The token for the next set of results. Use the value returned in the previous response in\n the next request to retrieve the next set of results. Length Constraints: Minimum length of 1.\n Maximum length of 2500.

" + } + }, + "ApproximateTotalCount": { + "target": "com.amazonaws.connect#ApproximateTotalCount", + "traits": { + "smithy.api#documentation": "

The total number of hours of operations which matched your search query.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.connect#SearchHoursOfOperations": { "type": "operation", "input": { @@ -34958,7 +36164,7 @@ } ], "traits": { - "smithy.api#documentation": "

Processes chat integration events from Amazon Web Services or external integrations to\n Amazon Connect. A chat integration event includes:

\n
    \n
  • \n

    SourceId, DestinationId, and Subtype: a set of identifiers, uniquely representing a\n chat

    \n
  • \n
  • \n

    ChatEvent: details of the chat action to perform such as sending a message, event, or\n disconnecting from a chat

    \n
  • \n
\n

When a chat integration event is sent with chat identifiers that do not map to an active\n chat contact, a new chat contact is also created before handling chat action.

\n

Access to this API is currently restricted to Amazon Web Services End User Messaging for supporting SMS\n integration.

", + "smithy.api#documentation": "

Processes chat integration events from Amazon Web Services or external integrations to\n Amazon Connect. A chat integration event includes:

\n
    \n
  • \n

    SourceId, DestinationId, and Subtype: a set of identifiers, uniquely representing a\n chat

    \n
  • \n
  • \n

    ChatEvent: details of the chat action to perform such as sending a message, event, or\n disconnecting from a chat

    \n
  • \n
\n

When a chat integration event is sent with chat identifiers that do not map to an active\n chat contact, a new chat contact is also created before handling chat action.

\n

Access to this API is currently restricted to Amazon Web Services End User Messaging for\n supporting SMS integration.

", "smithy.api#http": { "method": "POST", "uri": "/chat-integration-event", @@ -34979,7 +36185,7 @@ "DestinationId": { "target": "com.amazonaws.connect#DestinationId", "traits": { - "smithy.api#documentation": "

Chat system identifier, used in part to uniquely identify chat. This is associated with the\n Amazon Connect instance and flow to be used to start chats. For Server Migration Service, this is the phone\n number destination of inbound Server Migration Service messages represented by an Amazon Web Services End User Messaging phone number\n ARN.

", + "smithy.api#documentation": "

Chat system identifier, used in part to uniquely identify chat. This is associated with the\n Amazon Connect instance and flow to be used to start chats. For Server Migration Service, this is\n the phone number destination of inbound Server Migration Service messages represented by an Amazon Web Services End User Messaging phone number ARN.

", "smithy.api#required": {} } }, @@ -36195,7 +37401,7 @@ } ], "traits": { - "smithy.api#documentation": "

Initiates a new outbound SMS contact to a customer. Response of this API provides the\n ContactId of the outbound SMS contact created.

\n

\n SourceEndpoint only supports Endpoints with\n CONNECT_PHONENUMBER_ARN as Type and DestinationEndpoint only supports Endpoints with TELEPHONE_NUMBER as\n Type. ContactFlowId initiates the flow to manage the new SMS\n contact created.

\n

This API can be used to initiate outbound SMS contacts for an agent, or it can also deflect\n an ongoing contact to an outbound SMS contact by using the StartOutboundChatContact Flow Action.

\n

For more information about using SMS in Amazon Connect, see the following topics in the\n Amazon Connect Administrator Guide:

\n ", + "smithy.api#documentation": "

Initiates a new outbound SMS contact to a customer. Response of this API provides the\n ContactId of the outbound SMS contact created.

\n

\n SourceEndpoint only supports Endpoints with\n CONNECT_PHONENUMBER_ARN as Type and DestinationEndpoint only supports Endpoints with TELEPHONE_NUMBER as\n Type. ContactFlowId initiates the flow to manage the new SMS\n contact created.

\n

This API can be used to initiate outbound SMS contacts for an agent, or it can also deflect\n an ongoing contact to an outbound SMS contact by using the StartOutboundChatContact Flow Action.

\n

For more information about using SMS in Amazon Connect, see the following topics in the\n Amazon Connect Administrator Guide:

\n ", "smithy.api#http": { "method": "PUT", "uri": "/contact/outbound-chat", @@ -39853,13 +41059,13 @@ "CustomerEndpoint": { "target": "com.amazonaws.connect#Endpoint", "traits": { - "smithy.api#documentation": "

The endpoint of the customer for which the contact was initiated. For external audio\n contacts, this is usually the end customer's phone number. This value can only be updated for\n external audio contacts. For more information, see Amazon Connect\n Contact Lens integration in the Amazon Connect Administrator Guide.

" + "smithy.api#documentation": "

The endpoint of the customer for which the contact was initiated. For external audio\n contacts, this is usually the end customer's phone number. This value can only be updated for\n external audio contacts. For more information, see Amazon Connect Contact Lens\n integration in the Amazon Connect Administrator Guide.

" } }, "SystemEndpoint": { "target": "com.amazonaws.connect#Endpoint", "traits": { - "smithy.api#documentation": "

External system endpoint for the contact was initiated. For external audio contacts, this is\n the phone number of the external system such as the contact center. This value can only be\n updated for external audio contacts. For more information, see Amazon Connect\n Contact Lens integration in the Amazon Connect Administrator Guide.

" + "smithy.api#documentation": "

External system endpoint for the contact was initiated. For external audio contacts, this is\n the phone number of the external system such as the contact center. This value can only be\n updated for external audio contacts. For more information, see Amazon Connect Contact Lens\n integration in the Amazon Connect Administrator Guide.

" } } }, @@ -40321,6 +41527,108 @@ } } }, + "com.amazonaws.connect#UpdateHoursOfOperationOverride": { + "type": "operation", + "input": { + "target": "com.amazonaws.connect#UpdateHoursOfOperationOverrideRequest" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.connect#ConditionalOperationFailedException" + }, + { + "target": "com.amazonaws.connect#DuplicateResourceException" + }, + { + "target": "com.amazonaws.connect#InternalServiceException" + }, + { + "target": "com.amazonaws.connect#InvalidParameterException" + }, + { + "target": "com.amazonaws.connect#InvalidRequestException" + }, + { + "target": "com.amazonaws.connect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.connect#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Update the hours of operation override.

", + "smithy.api#http": { + "method": "POST", + "uri": "/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides/{HoursOfOperationOverrideId}", + "code": 200 + } + } + }, + "com.amazonaws.connect#UpdateHoursOfOperationOverrideRequest": { + "type": "structure", + "members": { + "InstanceId": { + "target": "com.amazonaws.connect#InstanceId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Connect instance.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "HoursOfOperationId": { + "target": "com.amazonaws.connect#HoursOfOperationId", + "traits": { + "smithy.api#documentation": "

The identifier for the hours of operation.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "HoursOfOperationOverrideId": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideId", + "traits": { + "smithy.api#documentation": "

The identifier for the hours of operation override.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "Name": { + "target": "com.amazonaws.connect#CommonHumanReadableName", + "traits": { + "smithy.api#documentation": "

The name of the hours of operation override.

" + } + }, + "Description": { + "target": "com.amazonaws.connect#CommonHumanReadableDescription", + "traits": { + "smithy.api#documentation": "

The description of the hours of operation override.

" + } + }, + "Config": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideConfigList", + "traits": { + "smithy.api#documentation": "

Configuration information for the hours of operation override: day, start time, and end\n time.

" + } + }, + "EffectiveFrom": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideYearMonthDayDateFormat", + "traits": { + "smithy.api#documentation": "

The date from when the hours of operation override would be effective.

" + } + }, + "EffectiveTill": { + "target": "com.amazonaws.connect#HoursOfOperationOverrideYearMonthDayDateFormat", + "traits": { + "smithy.api#documentation": "

The date till when the hours of operation override would be effective.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.connect#UpdateHoursOfOperationRequest": { "type": "structure", "members": { @@ -43359,13 +44667,13 @@ "FirstName": { "target": "com.amazonaws.connect#AgentFirstName", "traits": { - "smithy.api#documentation": "

The first name. This is required if you are using Amazon Connect or SAML for identity\n management.

" + "smithy.api#documentation": "

The first name. This is required if you are using Amazon Connect or SAML for identity\n management. Inputs must be in Unicode Normalization Form C (NFC). Text containing characters in a\n non-NFC form (for example, decomposed characters or combining marks) are not accepted.

" } }, "LastName": { "target": "com.amazonaws.connect#AgentLastName", "traits": { - "smithy.api#documentation": "

The last name. This is required if you are using Amazon Connect or SAML for identity\n management.

" + "smithy.api#documentation": "

The last name. This is required if you are using Amazon Connect or SAML for identity\n management. Inputs must be in Unicode Normalization Form C (NFC). Text containing characters in a\n non-NFC form (for example, decomposed characters or combining marks) are not accepted.

" } }, "Email": { diff --git a/models/controlcatalog.json b/models/controlcatalog.json index 931339825d..d359fb2431 100644 --- a/models/controlcatalog.json +++ b/models/controlcatalog.json @@ -1237,7 +1237,7 @@ } }, "traits": { - "smithy.api#documentation": "

An object that describes the implementation type for a control.

\n

Our ImplementationDetails\n Type format has three required segments:

\n
    \n
  • \n

    \n SERVICE-PROVIDER::SERVICE-NAME::RESOURCE-NAME\n

    \n
  • \n
\n

For example, AWS::Config::ConfigRule\n or\n AWS::SecurityHub::SecurityControl resources have the format with three required segments.

\n

Our ImplementationDetails\n Type format has an optional fourth segment, which is present for applicable \n implementation types. The format is as follows:

\n
    \n
  • \n

    \n SERVICE-PROVIDER::SERVICE-NAME::RESOURCE-NAME::RESOURCE-TYPE-DESCRIPTION\n

    \n
  • \n
\n

For example, AWS::Organizations::Policy::SERVICE_CONTROL_POLICY\n or\n AWS::CloudFormation::Type::HOOK have the format with four segments.

\n

Although the format is similar, the values for the Type field do not match any Amazon Web Services CloudFormation values, and we do not use CloudFormation to implement these controls.

" + "smithy.api#documentation": "

An object that describes the implementation type for a control.

\n

Our ImplementationDetails\n Type format has three required segments:

\n
    \n
  • \n

    \n SERVICE-PROVIDER::SERVICE-NAME::RESOURCE-NAME\n

    \n
  • \n
\n

For example, AWS::Config::ConfigRule\n or\n AWS::SecurityHub::SecurityControl resources have the format with three required segments.

\n

Our ImplementationDetails\n Type format has an optional fourth segment, which is present for applicable \n implementation types. The format is as follows:

\n
    \n
  • \n

    \n SERVICE-PROVIDER::SERVICE-NAME::RESOURCE-NAME::RESOURCE-TYPE-DESCRIPTION\n

    \n
  • \n
\n

For example, AWS::Organizations::Policy::SERVICE_CONTROL_POLICY\n or\n AWS::CloudFormation::Type::HOOK have the format with four segments.

\n

Although the format is similar, the values for the Type field do not match any Amazon Web Services CloudFormation values.

" } }, "com.amazonaws.controlcatalog#ImplementationType": { diff --git a/models/database-migration-service.json b/models/database-migration-service.json index 05e72af77b..dd96ca6009 100644 --- a/models/database-migration-service.json +++ b/models/database-migration-service.json @@ -3697,7 +3697,7 @@ } }, "ReplicationInstanceClass": { - "target": "com.amazonaws.databasemigrationservice#String", + "target": "com.amazonaws.databasemigrationservice#ReplicationInstanceClass", "traits": { "smithy.api#documentation": "

The compute and memory capacity of the replication instance as defined for the specified\n replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

\n

For more information on the settings and capacities for the available replication instance classes, see \n \n Choosing the right DMS replication instance; and, \n Selecting the best size for a replication instance.\n

", "smithy.api#required": {} @@ -3780,6 +3780,12 @@ "traits": { "smithy.api#documentation": "

The type of IP address protocol used by a replication instance, \n such as IPv4 only or Dual-stack that supports both IPv4 and IPv6 addressing. \n IPv6 only is not yet supported.

" } + }, + "KerberosAuthenticationSettings": { + "target": "com.amazonaws.databasemigrationservice#KerberosAuthenticationSettings", + "traits": { + "smithy.api#documentation": "

Specifies the ID of the secret that stores the key cache file required for kerberos authentication, when creating a replication instance.

" + } } }, "traits": { @@ -5071,6 +5077,9 @@ "target": "com.amazonaws.databasemigrationservice#DeleteEventSubscriptionResponse" }, "errors": [ + { + "target": "com.amazonaws.databasemigrationservice#AccessDeniedFault" + }, { "target": "com.amazonaws.databasemigrationservice#InvalidResourceStateFault" }, @@ -5533,6 +5542,9 @@ "target": "com.amazonaws.databasemigrationservice#DeleteReplicationSubnetGroupResponse" }, "errors": [ + { + "target": "com.amazonaws.databasemigrationservice#AccessDeniedFault" + }, { "target": "com.amazonaws.databasemigrationservice#InvalidResourceStateFault" }, @@ -6315,7 +6327,7 @@ "Filters": { "target": "com.amazonaws.databasemigrationservice#FilterList", "traits": { - "smithy.api#documentation": "

Filters applied to the data providers described in the form of key-value pairs.

\n

Valid filter names: data-provider-identifier

" + "smithy.api#documentation": "

Filters applied to the data providers described in the form of key-value pairs.

\n

Valid filter names and values: data-provider-identifier, data provider arn or name

" } }, "MaxRecords": { @@ -7428,7 +7440,7 @@ "Filters": { "target": "com.amazonaws.databasemigrationservice#FilterList", "traits": { - "smithy.api#documentation": "

Filters applied to the instance profiles described in the form of key-value pairs.

" + "smithy.api#documentation": "

Filters applied to the instance profiles described in the form of key-value pairs.

\n

Valid filter names and values: instance-profile-identifier, instance profile arn or name

" } }, "MaxRecords": { @@ -8072,7 +8084,7 @@ "Filters": { "target": "com.amazonaws.databasemigrationservice#FilterList", "traits": { - "smithy.api#documentation": "

Filters applied to the migration projects described in the form of key-value pairs.

" + "smithy.api#documentation": "

Filters applied to the migration projects described in the form of key-value pairs.

\n

Valid filter names and values:

\n
    \n
  • \n

    instance-profile-identifier, instance profile arn or name

    \n
  • \n
  • \n

    data-provider-identifier, data provider arn or name

    \n
  • \n
  • \n

    migration-project-identifier, migration project arn or name

    \n
  • \n
" } }, "MaxRecords": { @@ -9791,6 +9803,9 @@ "target": "com.amazonaws.databasemigrationservice#DescribeTableStatisticsResponse" }, "errors": [ + { + "target": "com.amazonaws.databasemigrationservice#AccessDeniedFault" + }, { "target": "com.amazonaws.databasemigrationservice#InvalidResourceStateFault" }, @@ -11717,6 +11732,12 @@ "traits": { "smithy.api#documentation": "

Sets hostname verification\n for the certificate. This setting is supported in DMS version 3.5.1 and later.

" } + }, + "UseLargeIntegerValue": { + "target": "com.amazonaws.databasemigrationservice#BooleanOptional", + "traits": { + "smithy.api#documentation": "

Specifies using the large integer value with Kafka.

" + } } }, "traits": { @@ -11740,6 +11761,32 @@ } } }, + "com.amazonaws.databasemigrationservice#KerberosAuthenticationSettings": { + "type": "structure", + "members": { + "KeyCacheSecretId": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "

Specifies the secret ID of the key cache for the replication instance.

" + } + }, + "KeyCacheSecretIamArn": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) of the IAM role that grants Amazon Web Services DMS access to the secret containing key cache file for the replication instance.

" + } + }, + "Krb5FileContents": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "

Specifies the ID of the secret that stores the key cache file required for kerberos authentication of the replication instance.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies using Kerberos authentication settings for use with DMS.

" + } + }, "com.amazonaws.databasemigrationservice#KeyList": { "type": "list", "member": { @@ -11808,6 +11855,12 @@ "traits": { "smithy.api#documentation": "

Set this optional parameter to true to avoid adding a '0x' prefix\n to raw data in hexadecimal format. For example, by default, DMS adds a '0x'\n prefix to the LOB column type in hexadecimal format moving from an Oracle source to an\n Amazon Kinesis target. Use the NoHexPrefix endpoint setting to enable\n migration of RAW data type columns without adding the '0x' prefix.

" } + }, + "UseLargeIntegerValue": { + "target": "com.amazonaws.databasemigrationservice#BooleanOptional", + "traits": { + "smithy.api#documentation": "

Specifies using the large integer value with Kinesis.

" + } } }, "traits": { @@ -12126,6 +12179,12 @@ "traits": { "smithy.api#documentation": "

Forces LOB lookup on inline LOB.

" } + }, + "AuthenticationMethod": { + "target": "com.amazonaws.databasemigrationservice#SqlServerAuthenticationMethod", + "traits": { + "smithy.api#documentation": "

Specifies using Kerberos authentication with Microsoft SQL Server.

" + } } }, "traits": { @@ -12841,6 +12900,9 @@ "target": "com.amazonaws.databasemigrationservice#ModifyEventSubscriptionResponse" }, "errors": [ + { + "target": "com.amazonaws.databasemigrationservice#AccessDeniedFault" + }, { "target": "com.amazonaws.databasemigrationservice#KMSAccessDeniedFault" }, @@ -13472,7 +13534,7 @@ } }, "ReplicationInstanceClass": { - "target": "com.amazonaws.databasemigrationservice#String", + "target": "com.amazonaws.databasemigrationservice#ReplicationInstanceClass", "traits": { "smithy.api#documentation": "

The compute and memory capacity of the replication instance as defined for the specified\n replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

\n

For more information on the settings and capacities for the available replication instance classes, see \n \n Selecting the right DMS replication instance for your migration.\n

" } @@ -13525,6 +13587,12 @@ "traits": { "smithy.api#documentation": "

The type of IP address protocol used by a replication instance, \n such as IPv4 only or Dual-stack that supports both IPv4 and IPv6 addressing. \n IPv6 only is not yet supported.

" } + }, + "KerberosAuthenticationSettings": { + "target": "com.amazonaws.databasemigrationservice#KerberosAuthenticationSettings", + "traits": { + "smithy.api#documentation": "

Specifies the ID of the secret that stores the key cache file required for kerberos authentication, when modifying a replication instance.

" + } } }, "traits": { @@ -14168,6 +14236,23 @@ } } }, + "com.amazonaws.databasemigrationservice#OracleAuthenticationMethod": { + "type": "enum", + "members": { + "Password": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "password" + } + }, + "Kerberos": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "kerberos" + } + } + } + }, "com.amazonaws.databasemigrationservice#OracleDataProviderSettings": { "type": "structure", "members": { @@ -14326,7 +14411,7 @@ "ArchivedLogsOnly": { "target": "com.amazonaws.databasemigrationservice#BooleanOptional", "traits": { - "smithy.api#documentation": "

When this field is set to Y, DMS only accesses the\n archived redo logs. If the archived redo logs are stored on\n Automatic Storage Management (ASM) only, the DMS user account needs to be\n granted ASM privileges.

" + "smithy.api#documentation": "

When this field is set to True, DMS only accesses the\n archived redo logs. If the archived redo logs are stored on\n Automatic Storage Management (ASM) only, the DMS user account needs to be\n granted ASM privileges.

" } }, "AsmPassword": { @@ -14440,19 +14525,19 @@ "UseBFile": { "target": "com.amazonaws.databasemigrationservice#BooleanOptional", "traits": { - "smithy.api#documentation": "

Set this attribute to Y to capture change data using the Binary Reader utility. Set\n UseLogminerReader to N to set this attribute to Y. To use Binary Reader\n with Amazon RDS for Oracle as the source, you set additional attributes. For more information\n about using this setting with Oracle Automatic Storage Management (ASM), see Using Oracle LogMiner or DMS Binary Reader for\n CDC.

" + "smithy.api#documentation": "

Set this attribute to True to capture change data using the Binary Reader utility. Set\n UseLogminerReader to False to set this attribute to True. To use Binary Reader\n with Amazon RDS for Oracle as the source, you set additional attributes. For more information\n about using this setting with Oracle Automatic Storage Management (ASM), see Using Oracle LogMiner or DMS Binary Reader for\n CDC.

" } }, "UseDirectPathFullLoad": { "target": "com.amazonaws.databasemigrationservice#BooleanOptional", "traits": { - "smithy.api#documentation": "

Set this attribute to Y to have DMS use a direct path full load. \n Specify this value to use the direct path protocol in the Oracle Call Interface (OCI). \n By using this OCI protocol, you can bulk-load Oracle target tables during a full load.

" + "smithy.api#documentation": "

Set this attribute to True to have DMS use a direct path full load. \n Specify this value to use the direct path protocol in the Oracle Call Interface (OCI). \n By using this OCI protocol, you can bulk-load Oracle target tables during a full load.

" } }, "UseLogminerReader": { "target": "com.amazonaws.databasemigrationservice#BooleanOptional", "traits": { - "smithy.api#documentation": "

Set this attribute to Y to capture change data using the Oracle LogMiner utility (the\n default). Set this attribute to N if you want to access the redo logs as a binary file.\n When you set UseLogminerReader to N, also set UseBfile to Y. For\n more information on this setting and using Oracle ASM, see Using Oracle LogMiner or DMS Binary Reader for CDC in\n the DMS User Guide.

" + "smithy.api#documentation": "

Set this attribute to True to capture change data using the Oracle LogMiner utility (the\n default). Set this attribute to False if you want to access the redo logs as a binary file.\n When you set UseLogminerReader to False, also set UseBfile to True. For\n more information on this setting and using Oracle ASM, see Using Oracle LogMiner or DMS Binary Reader for CDC in\n the DMS User Guide.

" } }, "SecretsManagerAccessRoleArn": { @@ -14494,7 +14579,13 @@ "OpenTransactionWindow": { "target": "com.amazonaws.databasemigrationservice#IntegerOptional", "traits": { - "smithy.api#documentation": "

The timeframe in minutes to check for open transactions for a CDC-only task.

\n

You can\n specify an integer value between 0 (the default) and 240 (the maximum).

\n \n

This parameter is only valid in DMS version 3.5.0 and later. DMS supports\n a window of up to 9.5 hours including the value for OpenTransactionWindow.

\n
" + "smithy.api#documentation": "

The timeframe in minutes to check for open transactions for a CDC-only task.

\n

You can\n specify an integer value between 0 (the default) and 240 (the maximum).

\n \n

This parameter is only valid in DMS version 3.5.0 and later.

\n
" + } + }, + "AuthenticationMethod": { + "target": "com.amazonaws.databasemigrationservice#OracleAuthenticationMethod", + "traits": { + "smithy.api#documentation": "

Specifies using Kerberos authentication with Oracle.

" } } }, @@ -14512,7 +14603,7 @@ } }, "ReplicationInstanceClass": { - "target": "com.amazonaws.databasemigrationservice#String", + "target": "com.amazonaws.databasemigrationservice#ReplicationInstanceClass", "traits": { "smithy.api#documentation": "

The compute and memory capacity of the replication instance as defined for the specified\n replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

\n

For more information on the settings and capacities for the available replication instance classes, see \n \n Selecting the right DMS replication instance for your migration.\n

" } @@ -14708,13 +14799,13 @@ "CaptureDdls": { "target": "com.amazonaws.databasemigrationservice#BooleanOptional", "traits": { - "smithy.api#documentation": "

To capture DDL events, DMS creates various artifacts in\n the PostgreSQL database when the task starts. You can later\n remove these artifacts.

\n

If this value is set to N, you don't have to create tables or\n triggers on the source database.

" + "smithy.api#documentation": "

To capture DDL events, DMS creates various artifacts in\n the PostgreSQL database when the task starts. You can later\n remove these artifacts.

\n

The default value is true.

\n

If this value is set to N, you don't have to create tables or\n triggers on the source database.

" } }, "MaxFileSize": { "target": "com.amazonaws.databasemigrationservice#IntegerOptional", "traits": { - "smithy.api#documentation": "

Specifies the maximum size (in KB) of any .csv file used to\n transfer data to PostgreSQL.

\n

Example: maxFileSize=512\n

" + "smithy.api#documentation": "

Specifies the maximum size (in KB) of any .csv file used to\n transfer data to PostgreSQL.

\n

The default value is 32,768 KB (32 MB).

\n

Example: maxFileSize=512\n

" } }, "DatabaseName": { @@ -14726,7 +14817,7 @@ "DdlArtifactsSchema": { "target": "com.amazonaws.databasemigrationservice#String", "traits": { - "smithy.api#documentation": "

The schema in which the operational DDL database artifacts\n are created.

\n

Example: ddlArtifactsSchema=xyzddlschema;\n

" + "smithy.api#documentation": "

The schema in which the operational DDL database artifacts\n are created.

\n

The default value is public.

\n

Example: ddlArtifactsSchema=xyzddlschema;\n

" } }, "ExecuteTimeout": { @@ -14738,25 +14829,25 @@ "FailTasksOnLobTruncation": { "target": "com.amazonaws.databasemigrationservice#BooleanOptional", "traits": { - "smithy.api#documentation": "

When set to true, this value causes a task to fail if the\n actual size of a LOB column is greater than the specified\n LobMaxSize.

\n

If task is set to Limited LOB mode and this option is set to\n true, the task fails instead of truncating the LOB data.

" + "smithy.api#documentation": "

When set to true, this value causes a task to fail if the\n actual size of a LOB column is greater than the specified\n LobMaxSize.

\n

The default value is false.

\n

If task is set to Limited LOB mode and this option is set to\n true, the task fails instead of truncating the LOB data.

" } }, "HeartbeatEnable": { "target": "com.amazonaws.databasemigrationservice#BooleanOptional", "traits": { - "smithy.api#documentation": "

The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this,\n it prevents idle logical replication slots from holding onto old WAL logs, which can result in\n storage full situations on the source. This heartbeat keeps restart_lsn moving\n and prevents storage full scenarios.

" + "smithy.api#documentation": "

The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this,\n it prevents idle logical replication slots from holding onto old WAL logs, which can result in\n storage full situations on the source. This heartbeat keeps restart_lsn moving\n and prevents storage full scenarios.

\n

The default value is false.

" } }, "HeartbeatSchema": { "target": "com.amazonaws.databasemigrationservice#String", "traits": { - "smithy.api#documentation": "

Sets the schema in which the heartbeat artifacts are created.

" + "smithy.api#documentation": "

Sets the schema in which the heartbeat artifacts are created.

\n

The default value is public.

" } }, "HeartbeatFrequency": { "target": "com.amazonaws.databasemigrationservice#IntegerOptional", "traits": { - "smithy.api#documentation": "

Sets the WAL heartbeat frequency (in minutes).

" + "smithy.api#documentation": "

Sets the WAL heartbeat frequency (in minutes).

\n

The default value is 5 minutes.

" } }, "Password": { @@ -14792,7 +14883,7 @@ "PluginName": { "target": "com.amazonaws.databasemigrationservice#PluginNameValue", "traits": { - "smithy.api#documentation": "

Specifies the plugin to use to create a replication slot.

" + "smithy.api#documentation": "

Specifies the plugin to use to create a replication slot.

\n

The default value is pglogical.

" } }, "SecretsManagerAccessRoleArn": { @@ -14816,19 +14907,19 @@ "MapBooleanAsBoolean": { "target": "com.amazonaws.databasemigrationservice#BooleanOptional", "traits": { - "smithy.api#documentation": "

When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as \n varchar(5). You must set this setting on both the source and target endpoints for it to take effect.

" + "smithy.api#documentation": "

When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as \n varchar(5). You must set this setting on both the source and target endpoints for it to take effect.

\n

The default value is false.

" } }, "MapJsonbAsClob": { "target": "com.amazonaws.databasemigrationservice#BooleanOptional", "traits": { - "smithy.api#documentation": "

When true, DMS migrates JSONB values as CLOB.

" + "smithy.api#documentation": "

When true, DMS migrates JSONB values as CLOB.

\n

The default value is false.

" } }, "MapLongVarcharAs": { "target": "com.amazonaws.databasemigrationservice#LongVarcharMappingType", "traits": { - "smithy.api#documentation": "

When true, DMS migrates LONG values as VARCHAR.

" + "smithy.api#documentation": "

Sets what datatype to map LONG values as.

\n

The default value is wstring.

" } }, "DatabaseMode": { @@ -14842,6 +14933,12 @@ "traits": { "smithy.api#documentation": "

The Babelfish for Aurora PostgreSQL database name for the endpoint.

" } + }, + "DisableUnicodeSourceFilter": { + "target": "com.amazonaws.databasemigrationservice#BooleanOptional", + "traits": { + "smithy.api#documentation": "

Disables the Unicode source filter with PostgreSQL, for values passed into the Selection rule filter on Source Endpoint column values. \n By default DMS performs source filter comparisons using a Unicode string which can cause look ups to ignore the indexes in the text columns and slow down migrations.

\n

Unicode support should only be disabled when using a selection rule filter is on a text column in the Source database that is indexed.

" + } } }, "traits": { @@ -15948,7 +16045,7 @@ "StartReplicationType": { "target": "com.amazonaws.databasemigrationservice#String", "traits": { - "smithy.api#documentation": "

The replication type.

" + "smithy.api#documentation": "

The type of replication to start.

" } }, "CdcStartTime": { @@ -16114,7 +16211,7 @@ } }, "ReplicationInstanceClass": { - "target": "com.amazonaws.databasemigrationservice#String", + "target": "com.amazonaws.databasemigrationservice#ReplicationInstanceClass", "traits": { "smithy.api#documentation": "

The compute and memory capacity of the replication instance as defined for the specified\n replication instance class. It is a required parameter, although a default value is\n pre-selected in the DMS console.

\n

For more information on the settings and capacities for the available replication instance classes, see \n \n Selecting the right DMS replication instance for your migration.\n

" } @@ -16262,12 +16359,27 @@ "traits": { "smithy.api#documentation": "

The type of IP address protocol used by a replication instance, \n such as IPv4 only or Dual-stack that supports both IPv4 and IPv6 addressing. \n IPv6 only is not yet supported.

" } + }, + "KerberosAuthenticationSettings": { + "target": "com.amazonaws.databasemigrationservice#KerberosAuthenticationSettings", + "traits": { + "smithy.api#documentation": "

Specifies the ID of the secret that stores the key cache file required for kerberos authentication, when replicating an instance.

" + } } }, "traits": { "smithy.api#documentation": "

Provides information that defines a replication instance.

" } }, + "com.amazonaws.databasemigrationservice#ReplicationInstanceClass": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 30 + } + } + }, "com.amazonaws.databasemigrationservice#ReplicationInstanceIpv6AddressList": { "type": "list", "member": { @@ -16341,7 +16453,7 @@ "type": "structure", "members": { "ReplicationInstanceClass": { - "target": "com.amazonaws.databasemigrationservice#String", + "target": "com.amazonaws.databasemigrationservice#ReplicationInstanceClass", "traits": { "smithy.api#documentation": "

The compute and memory capacity of the replication instance as defined for the specified\n replication instance class.

\n

For more information on the settings and capacities for the available replication instance classes, see \n \n Selecting the right DMS replication instance for your migration.\n

" } @@ -16589,7 +16701,7 @@ "StopReason": { "target": "com.amazonaws.databasemigrationservice#String", "traits": { - "smithy.api#documentation": "

The reason the replication task was stopped. This response parameter can return one of\n the following values:

\n
    \n
  • \n

    \n \"Stop Reason NORMAL\"\n

    \n
  • \n
  • \n

    \n \"Stop Reason RECOVERABLE_ERROR\"\n

    \n
  • \n
  • \n

    \n \"Stop Reason FATAL_ERROR\"\n

    \n
  • \n
  • \n

    \n \"Stop Reason FULL_LOAD_ONLY_FINISHED\"\n

    \n
  • \n
  • \n

    \n \"Stop Reason STOPPED_AFTER_FULL_LOAD\" – Full load completed, with cached changes not applied

    \n
  • \n
  • \n

    \n \"Stop Reason STOPPED_AFTER_CACHED_EVENTS\" – Full load completed, with cached changes applied

    \n
  • \n
  • \n

    \n \"Stop Reason EXPRESS_LICENSE_LIMITS_REACHED\"\n

    \n
  • \n
  • \n

    \n \"Stop Reason STOPPED_AFTER_DDL_APPLY\" – User-defined stop task after DDL applied

    \n
  • \n
  • \n

    \n \"Stop Reason STOPPED_DUE_TO_LOW_MEMORY\"\n

    \n
  • \n
  • \n

    \n \"Stop Reason STOPPED_DUE_TO_LOW_DISK\"\n

    \n
  • \n
  • \n

    \n \"Stop Reason STOPPED_AT_SERVER_TIME\" – User-defined server time for stopping task

    \n
  • \n
  • \n

    \n \"Stop Reason STOPPED_AT_COMMIT_TIME\" – User-defined commit time for stopping task

    \n
  • \n
  • \n

    \n \"Stop Reason RECONFIGURATION_RESTART\"\n

    \n
  • \n
  • \n

    \n \"Stop Reason RECYCLE_TASK\"\n

    \n
  • \n
" + "smithy.api#documentation": "

The reason the replication task was stopped. This response parameter can return one of\n the following values:

\n
    \n
  • \n

    \n \"Stop Reason NORMAL\" – The task completed successfully with no additional information returned.

    \n
  • \n
  • \n

    \n \"Stop Reason RECOVERABLE_ERROR\"\n

    \n
  • \n
  • \n

    \n \"Stop Reason FATAL_ERROR\"\n

    \n
  • \n
  • \n

    \n \"Stop Reason FULL_LOAD_ONLY_FINISHED\" – The task completed the full load phase.\n DMS applied cached changes if you set StopTaskCachedChangesApplied to true.

    \n
  • \n
  • \n

    \n \"Stop Reason STOPPED_AFTER_FULL_LOAD\" – Full load completed, with cached changes not applied

    \n
  • \n
  • \n

    \n \"Stop Reason STOPPED_AFTER_CACHED_EVENTS\" – Full load completed, with cached changes applied

    \n
  • \n
  • \n

    \n \"Stop Reason EXPRESS_LICENSE_LIMITS_REACHED\"\n

    \n
  • \n
  • \n

    \n \"Stop Reason STOPPED_AFTER_DDL_APPLY\" – User-defined stop task after DDL applied

    \n
  • \n
  • \n

    \n \"Stop Reason STOPPED_DUE_TO_LOW_MEMORY\"\n

    \n
  • \n
  • \n

    \n \"Stop Reason STOPPED_DUE_TO_LOW_DISK\"\n

    \n
  • \n
  • \n

    \n \"Stop Reason STOPPED_AT_SERVER_TIME\" – User-defined server time for stopping task

    \n
  • \n
  • \n

    \n \"Stop Reason STOPPED_AT_COMMIT_TIME\" – User-defined commit time for stopping task

    \n
  • \n
  • \n

    \n \"Stop Reason RECONFIGURATION_RESTART\"\n

    \n
  • \n
  • \n

    \n \"Stop Reason RECYCLE_TASK\"\n

    \n
  • \n
" } }, "ReplicationTaskCreationDate": { @@ -16691,7 +16803,7 @@ } }, "S3ObjectUrl": { - "target": "com.amazonaws.databasemigrationservice#String", + "target": "com.amazonaws.databasemigrationservice#SecretString", "traits": { "smithy.api#documentation": "

The URL of the S3 object containing the task assessment results.

\n

The response object only contains this field if you provide DescribeReplicationTaskAssessmentResultsMessage$ReplicationTaskArn\n in the request.

" } @@ -16728,7 +16840,7 @@ "Status": { "target": "com.amazonaws.databasemigrationservice#String", "traits": { - "smithy.api#documentation": "

Assessment run status.

\n

This status can have one of the following values:

\n
    \n
  • \n

    \n \"cancelling\" – The assessment run was canceled by the\n CancelReplicationTaskAssessmentRun operation.

    \n
  • \n
  • \n

    \n \"deleting\" – The assessment run was deleted by the\n DeleteReplicationTaskAssessmentRun operation.

    \n
  • \n
  • \n

    \n \"failed\" – At least one individual assessment completed with a\n failed status.

    \n
  • \n
  • \n

    \n \"error-provisioning\" – An internal error occurred while\n resources were provisioned (during provisioning status).

    \n
  • \n
  • \n

    \n \"error-executing\" – An internal error occurred while\n individual assessments ran (during running status).

    \n
  • \n
  • \n

    \n \"invalid state\" – The assessment run is in an unknown state.

    \n
  • \n
  • \n

    \n \"passed\" – All individual assessments have completed, and none\n has a failed status.

    \n
  • \n
  • \n

    \n \"provisioning\" – Resources required to run individual\n assessments are being provisioned.

    \n
  • \n
  • \n

    \n \"running\" – Individual assessments are being run.

    \n
  • \n
  • \n

    \n \"starting\" – The assessment run is starting, but resources are not yet\n being provisioned for individual assessments.

    \n
  • \n
" + "smithy.api#documentation": "

Assessment run status.

\n

This status can have one of the following values:

\n
    \n
  • \n

    \n \"cancelling\" – The assessment run was canceled by the\n CancelReplicationTaskAssessmentRun operation.

    \n
  • \n
  • \n

    \n \"deleting\" – The assessment run was deleted by the\n DeleteReplicationTaskAssessmentRun operation.

    \n
  • \n
  • \n

    \n \"failed\" – At least one individual assessment completed with a\n failed status.

    \n
  • \n
  • \n

    \n \"error-provisioning\" – An internal error occurred while\n resources were provisioned (during provisioning status).

    \n
  • \n
  • \n

    \n \"error-executing\" – An internal error occurred while\n individual assessments ran (during running status).

    \n
  • \n
  • \n

    \n \"invalid state\" – The assessment run is in an unknown state.

    \n
  • \n
  • \n

    \n \"passed\" – All individual assessments have completed, and none\n has a failed status.

    \n
  • \n
  • \n

    \n \"provisioning\" – Resources required to run individual\n assessments are being provisioned.

    \n
  • \n
  • \n

    \n \"running\" – Individual assessments are being run.

    \n
  • \n
  • \n

    \n \"starting\" – The assessment run is starting, but resources are not yet\n being provisioned for individual assessments.

    \n
  • \n
  • \n

    \n \"warning\" – At least one individual assessment completed with a warning status.

    \n
  • \n
" } }, "ReplicationTaskAssessmentRunCreationDate": { @@ -17703,6 +17815,23 @@ } } }, + "com.amazonaws.databasemigrationservice#SqlServerAuthenticationMethod": { + "type": "enum", + "members": { + "Password": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "password" + } + }, + "Kerberos": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "kerberos" + } + } + } + }, "com.amazonaws.databasemigrationservice#SslSecurityProtocolValue": { "type": "enum", "members": { @@ -18446,7 +18575,7 @@ "StartReplicationType": { "target": "com.amazonaws.databasemigrationservice#String", "traits": { - "smithy.api#documentation": "

The replication type.

", + "smithy.api#documentation": "

The replication type.

\n

When the replication type is full-load or full-load-and-cdc, the only valid value \n for the first run of the replication is start-replication. This option will start the replication.

\n

You can also use ReloadTables to reload specific tables that failed during replication instead \n of restarting the replication.

\n

The resume-processing option isn't applicable for a full-load replication,\n because you can't resume partially loaded tables during the full load phase.

\n

For a full-load-and-cdc replication, DMS migrates table data, and then applies data changes \n that occur on the source. To load all the tables again, and start capturing source changes, \n use reload-target. Otherwise use resume-processing, to replicate the \n changes from the last stop position.

", "smithy.api#required": {} } }, diff --git a/models/dlm.json b/models/dlm.json index 235ee067f3..e6a6e6e149 100644 --- a/models/dlm.json +++ b/models/dlm.json @@ -306,7 +306,7 @@ "Location": { "target": "com.amazonaws.dlm#LocationValues", "traits": { - "smithy.api#documentation": "

\n [Custom snapshot policies only] Specifies the destination for snapshots created by the policy. To create \n\t\t\tsnapshots in the same Region as the source resource, specify CLOUD. To create \n\t\t\tsnapshots on the same Outpost as the source resource, specify OUTPOST_LOCAL. \n\t\t\tIf you omit this parameter, CLOUD is used by default.

\n

If the policy targets resources in an Amazon Web Services Region, then you must create \n\t\t\tsnapshots in the same Region as the source resource. If the policy targets resources on an \n\t\t\tOutpost, then you can create snapshots on the same Outpost as the source resource, or in \n\t\t\tthe Region of that Outpost.

" + "smithy.api#documentation": "

\n [Custom snapshot policies only] Specifies the destination for snapshots created by the policy. The \n\t\t\tallowed destinations depend on the location of the targeted resources.

\n
    \n
  • \n

    If the policy targets resources in a Region, then you must create snapshots \n\t\t\t\t\tin the same Region as the source resource.

    \n
  • \n
  • \n

    If the policy targets resources in a Local Zone, you can create snapshots in \n\t\t\t\t\tthe same Local Zone or in its parent Region.

    \n
  • \n
  • \n

    If the policy targets resources on an Outpost, then you can create snapshots \n\t\t\t\t\ton the same Outpost or in its parent Region.

    \n
  • \n
\n

Specify one of the following values:

\n
    \n
  • \n

    To create snapshots in the same Region as the source resource, specify \n\t\t\t\t\tCLOUD.

    \n
  • \n
  • \n

    To create snapshots in the same Local Zone as the source resource, specify \n\t\t\t\t\tLOCAL_ZONE.

    \n
  • \n
  • \n

    To create snapshots on the same Outpost as the source resource, specify \n\t\t\t\t\tOUTPOST_LOCAL.

    \n
  • \n
\n

Default: CLOUD\n

" } }, "Interval": { @@ -330,7 +330,7 @@ "CronExpression": { "target": "com.amazonaws.dlm#CronExpression", "traits": { - "smithy.api#documentation": "

The schedule, as a Cron expression. The schedule interval must be between 1 hour and 1\n\t\t\tyear. For more information, see Cron\n\t\t\t\texpressions in the Amazon CloudWatch User Guide.

" + "smithy.api#documentation": "

The schedule, as a Cron expression. The schedule interval must be between 1 hour and 1\n\t\t\tyear. For more information, see the Cron expressions reference in \n\t\t\tthe Amazon EventBridge User Guide.

" } }, "Scripts": { @@ -1204,12 +1204,12 @@ "DefaultPolicy": { "target": "com.amazonaws.dlm#DefaultPolicy", "traits": { - "smithy.api#documentation": "

\n [Default policies only] The type of default policy. Values include:

\n
    \n
  • \n

    \n VOLUME - Default policy for EBS snapshots

    \n
  • \n
  • \n

    \n INSTANCE - Default policy for EBS-backed AMIs

    \n
  • \n
" + "smithy.api#documentation": "

Indicates whether the policy is a default lifecycle policy or a custom \n\t\t\tlifecycle policy.

\n
    \n
  • \n

    \n true - the policy is a default policy.

    \n
  • \n
  • \n

    \n false - the policy is a custom policy.

    \n
  • \n
" } } }, "traits": { - "smithy.api#documentation": "

\n [Custom policies only] Detailed information about a snapshot, AMI, or event-based lifecycle policy.

" + "smithy.api#documentation": "

Information about a lifecycle policy.

" } }, "com.amazonaws.dlm#LifecyclePolicySummary": { @@ -1356,6 +1356,12 @@ "traits": { "smithy.api#enumValue": "OUTPOST_LOCAL" } + }, + "LOCAL_ZONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LOCAL_ZONE" + } } } }, @@ -1423,7 +1429,7 @@ "PolicyType": { "target": "com.amazonaws.dlm#PolicyTypeValues", "traits": { - "smithy.api#documentation": "

\n [Custom policies only] The valid target resource types and actions a policy can manage. Specify EBS_SNAPSHOT_MANAGEMENT \n\t\t\tto create a lifecycle policy that manages the lifecycle of Amazon EBS snapshots. Specify IMAGE_MANAGEMENT \n\t\t\tto create a lifecycle policy that manages the lifecycle of EBS-backed AMIs. Specify EVENT_BASED_POLICY \n\t\t\tto create an event-based policy that performs specific actions when a defined event occurs in your Amazon Web Services account.

\n

The default is EBS_SNAPSHOT_MANAGEMENT.

" + "smithy.api#documentation": "

The type of policy. Specify EBS_SNAPSHOT_MANAGEMENT \n\t\t\tto create a lifecycle policy that manages the lifecycle of Amazon EBS snapshots. Specify IMAGE_MANAGEMENT \n\t\t\tto create a lifecycle policy that manages the lifecycle of EBS-backed AMIs. Specify EVENT_BASED_POLICY \n\t\t\tto create an event-based policy that performs specific actions when a defined event occurs in your Amazon Web Services account.

\n

The default is EBS_SNAPSHOT_MANAGEMENT.

" } }, "ResourceTypes": { @@ -1435,7 +1441,7 @@ "ResourceLocations": { "target": "com.amazonaws.dlm#ResourceLocationList", "traits": { - "smithy.api#documentation": "

\n [Custom snapshot and AMI policies only] The location of the resources to backup. If the source resources are located in an \n\t\t\tAmazon Web Services Region, specify CLOUD. If the source resources are located on an Outpost \n\t\t\tin your account, specify OUTPOST.

\n

If you specify OUTPOST, Amazon Data Lifecycle Manager backs up all resources \n\t\t\t\tof the specified type with matching target tags across all of the Outposts in your account.

" + "smithy.api#documentation": "

\n [Custom snapshot and AMI policies only] The location of the resources to backup.

\n
    \n
  • \n

    If the source resources are located in a Region, specify CLOUD. In this case, \n\t\t\t\t\tthe policy targets all resources of the specified type with matching target tags across all \n\t\t\t\t\tAvailability Zones in the Region.

    \n
  • \n
  • \n

    \n [Custom snapshot policies only] If the source resources are located in a Local Zone, specify LOCAL_ZONE. \n\t\t\t\t\tIn this case, the policy targets all resources of the specified type with matching target \n\t\t\t\t\ttags across all Local Zones in the Region.

    \n
  • \n
  • \n

    If the source resources are located on an Outpost in your account, specify OUTPOST. \n\t\t\t\t\tIn this case, the policy targets all resources of the specified type with matching target \n\t\t\t\t\ttags across all of the Outposts in your account.

    \n
  • \n
\n

" } }, "TargetTags": { @@ -1603,6 +1609,12 @@ "traits": { "smithy.api#enumValue": "OUTPOST" } + }, + "LOCAL_ZONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LOCAL_ZONE" + } } } }, @@ -1800,7 +1812,7 @@ "CrossRegionCopyRules": { "target": "com.amazonaws.dlm#CrossRegionCopyRules", "traits": { - "smithy.api#documentation": "

Specifies a rule for copying snapshots or AMIs across regions.

\n \n

You can't specify cross-Region copy rules for policies that create snapshots on an Outpost. \n\t\t\tIf the policy creates snapshots in a Region, then snapshots can be copied to up to three \n\t\t\tRegions or Outposts.

\n
" + "smithy.api#documentation": "

Specifies a rule for copying snapshots or AMIs across Regions.

\n \n

You can't specify cross-Region copy rules for policies that create snapshots on an \n\t\t\t\tOutpost or in a Local Zone. If the policy creates snapshots in a Region, then snapshots \n\t\t\t\tcan be copied to up to three Regions or Outposts.

\n
" } }, "ShareRules": { diff --git a/models/dsql.json b/models/dsql.json index 0807319d06..ac03126cd0 100644 --- a/models/dsql.json +++ b/models/dsql.json @@ -385,8 +385,8 @@ }, "output": { "linkedClusterArns": [ - "arn:aws:xanadu:us-east-1:111122223333:cluster/abcdefghijklmnopqrst12345", - "arn:aws:xanadu:us-east-2:111122223333:cluster/klmnopqrstuvwxyzabcd54321" + "arn:aws:dsql:us-east-1:111122223333:cluster/abcdefghijklmnopqrst12345", + "arn:aws:dsql:us-east-2:111122223333:cluster/klmnopqrstuvwxyzabcd54321" ] } } @@ -973,8 +973,8 @@ "title": "Delete Multi Region Clusters", "input": { "linkedClusterArns": [ - "arn:aws:xanadu:us-east-1:111122223333:cluster/abcdefghijklmnopqrst12345", - "arn:aws:xanadu:us-east-2:111122223333:cluster/klmnopqrstuvwxyzabcd54321" + "arn:aws:dsql:us-east-1:111122223333:cluster/abcdefghijklmnopqrst12345", + "arn:aws:dsql:us-east-2:111122223333:cluster/klmnopqrstuvwxyzabcd54321" ] } } diff --git a/models/ec2.json b/models/ec2.json index c84199fffc..336ecc3308 100644 --- a/models/ec2.json +++ b/models/ec2.json @@ -3863,6 +3863,9 @@ { "target": "com.amazonaws.ec2#ModifyInstanceMetadataOptions" }, + { + "target": "com.amazonaws.ec2#ModifyInstanceNetworkPerformanceOptions" + }, { "target": "com.amazonaws.ec2#ModifyInstancePlacement" }, @@ -9330,6 +9333,38 @@ } } }, + "com.amazonaws.ec2#BandwidthWeightingType": { + "type": "enum", + "members": { + "DEFAULT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "default" + } + }, + "VPC_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "vpc-1" + } + }, + "EBS_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ebs-1" + } + } + } + }, + "com.amazonaws.ec2#BandwidthWeightingTypeList": { + "type": "list", + "member": { + "target": "com.amazonaws.ec2#BandwidthWeightingType", + "traits": { + "smithy.api#xmlName": "item" + } + } + }, "com.amazonaws.ec2#BareMetal": { "type": "enum", "members": { @@ -19770,7 +19805,7 @@ "target": "com.amazonaws.ec2#Snapshot" }, "traits": { - "smithy.api#documentation": "

Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for\n \tbackups, to make copies of EBS volumes, and to save data before shutting down an\n \tinstance.

\n

You can create snapshots of volumes in a Region and volumes on an Outpost. If you \n \tcreate a snapshot of a volume in a Region, the snapshot must be stored in the same \n \tRegion as the volume. If you create a snapshot of a volume on an Outpost, the snapshot \n \tcan be stored on the same Outpost as the volume, or in the Region for that Outpost.

\n

When a snapshot is created, any Amazon Web Services Marketplace product codes that are associated with the\n source volume are propagated to the snapshot.

\n

You can take a snapshot of an attached volume that is in use. However, snapshots only\n capture data that has been written to your Amazon EBS volume at the time the snapshot command is\n issued; this might exclude any data that has been cached by any applications or the operating\n system. If you can pause any file systems on the volume long enough to take a snapshot, your\n snapshot should be complete. However, if you cannot pause all file writes to the volume, you\n should unmount the volume from within the instance, issue the snapshot command, and then\n remount the volume to ensure a consistent and complete snapshot. You may remount and use your\n volume while the snapshot status is pending.

\n

When you create a snapshot for an EBS volume that serves as a root device, we recommend \n that you stop the instance before taking the snapshot.

\n

Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that\n are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes\n and any associated snapshots always remain protected.

\n

You can tag your snapshots during creation. For more information, see Tag your Amazon EC2\n resources in the Amazon EC2 User Guide.

\n

For more information, see Amazon EBS and Amazon EBS encryption in the Amazon EBS User Guide.

", + "smithy.api#documentation": "

Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for\n \tbackups, to make copies of EBS volumes, and to save data before shutting down an\n \tinstance.

\n

The location of the source EBS volume determines where you can create the snapshot.

\n
    \n
  • \n

    If the source volume is in a Region, you must create the snapshot in the same \n Region as the volume.

    \n
  • \n
  • \n

    If the source volume is in a Local Zone, you can create the snapshot in the same \n Local Zone or in parent Amazon Web Services Region.

    \n
  • \n
  • \n

    If the source volume is on an Outpost, you can create the snapshot on the same \n Outpost or in its parent Amazon Web Services Region.

    \n
  • \n
\n

When a snapshot is created, any Amazon Web Services Marketplace product codes that are associated with the\n source volume are propagated to the snapshot.

\n

You can take a snapshot of an attached volume that is in use. However, snapshots only\n capture data that has been written to your Amazon EBS volume at the time the snapshot command is\n issued; this might exclude any data that has been cached by any applications or the operating\n system. If you can pause any file systems on the volume long enough to take a snapshot, your\n snapshot should be complete. However, if you cannot pause all file writes to the volume, you\n should unmount the volume from within the instance, issue the snapshot command, and then\n remount the volume to ensure a consistent and complete snapshot. You may remount and use your\n volume while the snapshot status is pending.

\n

When you create a snapshot for an EBS volume that serves as a root device, we recommend \n that you stop the instance before taking the snapshot.

\n

Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that\n are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes\n and any associated snapshots always remain protected. For more information, \n Amazon EBS encryption \n in the Amazon EBS User Guide.

", "smithy.api#examples": [ { "title": "To create a snapshot", @@ -19805,7 +19840,7 @@ "OutpostArn": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Outpost on which to create a local \n \tsnapshot.

\n
    \n
  • \n

    To create a snapshot of a volume in a Region, omit this parameter. The snapshot \n \t\t\t\tis created in the same Region as the volume.

    \n
  • \n
  • \n

    To create a snapshot of a volume on an Outpost and store the snapshot in the \n \t\t\t\tRegion, omit this parameter. The snapshot is created in the Region for the \n \t\t\t\tOutpost.

    \n
  • \n
  • \n

    To create a snapshot of a volume on an Outpost and store the snapshot on an \n \t\t\tOutpost, specify the ARN of the destination Outpost. The snapshot must be created on \n \t\t\tthe same Outpost as the volume.

    \n
  • \n
\n

For more information, see Create local snapshots from volumes on an Outpost in the Amazon EBS User Guide.

" + "smithy.api#documentation": "\n

Only supported for volumes on Outposts. If the source volume is not on an Outpost, \n omit this parameter.

\n
\n
    \n
  • \n

    To create the snapshot on the same Outpost as the source volume, specify the \n ARN of that Outpost. The snapshot must be created on the same Outpost as the volume.

    \n
  • \n
  • \n

    To create the snapshot in the parent Region of the Outpost, omit this parameter.

    \n
  • \n
\n

For more information, see Create local snapshots from volumes on an Outpost in the Amazon EBS User Guide.

" } }, "VolumeId": { @@ -19823,6 +19858,12 @@ "smithy.api#xmlName": "TagSpecification" } }, + "Location": { + "target": "com.amazonaws.ec2#SnapshotLocationEnum", + "traits": { + "smithy.api#documentation": "\n

Only supported for volumes in Local Zones. If the source volume is not in a Local Zone, \n omit this parameter.

\n
\n
    \n
  • \n

    To create a local snapshot in the same Local Zone as the source volume, specify \n local.

    \n
  • \n
  • \n

    To create a regional snapshot in the parent Region of the Local Zone, specify \n regional or omit this parameter.

    \n
  • \n
\n

Default value: regional\n

" + } + }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -19845,7 +19886,7 @@ "target": "com.amazonaws.ec2#CreateSnapshotsResult" }, "traits": { - "smithy.api#documentation": "

Creates crash-consistent snapshots of multiple EBS volumes and stores the data in S3.\n Volumes are chosen by specifying an instance. Any attached volumes will produce one snapshot\n each that is crash-consistent across the instance.

\n

You can include all of the volumes currently attached to the instance, or you can exclude \n the root volume or specific data (non-root) volumes from the multi-volume snapshot set.

\n

You can create multi-volume snapshots of instances in a Region and instances on an \n \tOutpost. If you create snapshots from an instance in a Region, the snapshots must be stored \n \tin the same Region as the instance. If you create snapshots from an instance on an Outpost, \n \tthe snapshots can be stored on the same Outpost as the instance, or in the Region for that \n \tOutpost.

" + "smithy.api#documentation": "

Creates crash-consistent snapshots of multiple EBS volumes attached to an Amazon EC2 instance.\n Volumes are chosen by specifying an instance. Each volume attached to the specified instance \n will produce one snapshot that is crash-consistent across the instance. You can include all of \n the volumes currently attached to the instance, or you can exclude the root volume or specific \n data (non-root) volumes from the multi-volume snapshot set.

\n

The location of the source instance determines where you can create the snapshots.

\n
    \n
  • \n

    If the source instance is in a Region, you must create the snapshots in the same \n Region as the instance.

    \n
  • \n
  • \n

    If the source instance is in a Local Zone, you can create the snapshots in the same \n Local Zone or in parent Amazon Web Services Region.

    \n
  • \n
  • \n

    If the source instance is on an Outpost, you can create the snapshots on the same \n Outpost or in its parent Amazon Web Services Region.

    \n
  • \n
" } }, "com.amazonaws.ec2#CreateSnapshotsRequest": { @@ -19868,7 +19909,7 @@ "OutpostArn": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Outpost on which to create the local \n \t\tsnapshots.

\n
    \n
  • \n

    To create snapshots from an instance in a Region, omit this parameter. The \n \t\t\t\tsnapshots are created in the same Region as the instance.

    \n
  • \n
  • \n

    To create snapshots from an instance on an Outpost and store the snapshots \n \t\t\t\tin the Region, omit this parameter. The snapshots are created in the Region \n \t\t\t\tfor the Outpost.

    \n
  • \n
  • \n

    To create snapshots from an instance on an Outpost and store the snapshots \n \t\t\t\ton an Outpost, specify the ARN of the destination Outpost. The snapshots must \n \t\t\t\tbe created on the same Outpost as the instance.

    \n
  • \n
\n

For more information, see \n \t\tCreate multi-volume local snapshots from instances on an Outpost in the \n \t\tAmazon EBS User Guide.

" + "smithy.api#documentation": "\n

Only supported for instances on Outposts. If the source instance is not on an Outpost, \n omit this parameter.

\n
\n
    \n
  • \n

    To create the snapshots on the same Outpost as the source instance, specify the \n ARN of that Outpost. The snapshots must be created on the same Outpost as the instance.

    \n
  • \n
  • \n

    To create the snapshots in the parent Region of the Outpost, omit this parameter.

    \n
  • \n
\n

For more information, see \n Create local snapshots from volumes on an Outpost in the Amazon EBS User Guide.

" } }, "TagSpecifications": { @@ -19889,6 +19930,12 @@ "traits": { "smithy.api#documentation": "

Copies the tags from the specified volume to corresponding snapshot.

" } + }, + "Location": { + "target": "com.amazonaws.ec2#SnapshotLocationEnum", + "traits": { + "smithy.api#documentation": "\n

Only supported for instances in Local Zones. If the source instance is not in a Local Zone, \n omit this parameter.

\n
\n
    \n
  • \n

    To create local snapshots in the same Local Zone as the source instance, specify \n local.

    \n
  • \n
  • \n

    To create a regional snapshots in the parent Region of the Local Zone, specify \n regional or omit this parameter.

    \n
  • \n
\n

Default value: regional\n

" + } } }, "traits": { @@ -26436,7 +26483,7 @@ "target": "com.amazonaws.ec2#DeleteSecurityGroupRequest" }, "output": { - "target": "smithy.api#Unit" + "target": "com.amazonaws.ec2#DeleteSecurityGroupResult" }, "traits": { "smithy.api#documentation": "

Deletes a security group.

\n

If you attempt to delete a security group that is associated with an instance or network interface, is\n\t\t\t referenced by another security group in the same VPC, or has a VPC association, the operation fails with\n\t\t\t\tDependencyViolation.

", @@ -26480,6 +26527,30 @@ "smithy.api#input": {} } }, + "com.amazonaws.ec2#DeleteSecurityGroupResult": { + "type": "structure", + "members": { + "Return": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "Return", + "smithy.api#documentation": "

Returns true if the request succeeds; otherwise, returns an error.

", + "smithy.api#xmlName": "return" + } + }, + "GroupId": { + "target": "com.amazonaws.ec2#SecurityGroupId", + "traits": { + "aws.protocols#ec2QueryName": "GroupId", + "smithy.api#documentation": "

The ID of the deleted security group.

", + "smithy.api#xmlName": "groupId" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#DeleteSnapshot": { "type": "operation", "input": { @@ -34090,7 +34161,7 @@ "target": "com.amazonaws.ec2#DescribeInstanceTopologyResult" }, "traits": { - "smithy.api#documentation": "

Describes a tree-based hierarchy that represents the physical host placement of your\n EC2 instances within an Availability Zone or Local Zone. You can use this information to\n determine the relative proximity of your EC2 instances within the Amazon Web Services network to\n support your tightly coupled workloads.

\n

\n Limitations\n

\n
    \n
  • \n

    Supported zones

    \n
      \n
    • \n

      Availability Zone

      \n
    • \n
    • \n

      Local Zone

      \n
    • \n
    \n
  • \n
  • \n

    Supported instance types

    \n
      \n
    • \n

      \n hpc6a.48xlarge | hpc6id.32xlarge |\n hpc7a.12xlarge | hpc7a.24xlarge |\n hpc7a.48xlarge | hpc7a.96xlarge |\n hpc7g.4xlarge | hpc7g.8xlarge |\n hpc7g.16xlarge\n

      \n
    • \n
    • \n

      \n p3dn.24xlarge | p4d.24xlarge |\n p4de.24xlarge | p5.48xlarge |\n p5e.48xlarge\n

      \n
    • \n
    • \n

      \n trn1.2xlarge | trn1.32xlarge |\n trn1n.32xlarge\n

      \n
    • \n
    \n
  • \n
\n

For more information, see Amazon EC2 instance\n topology in the Amazon EC2 User Guide.

", + "smithy.api#documentation": "

Describes a tree-based hierarchy that represents the physical host placement of your\n EC2 instances within an Availability Zone or Local Zone. You can use this information to\n determine the relative proximity of your EC2 instances within the Amazon Web Services network to\n support your tightly coupled workloads.

\n

\n Limitations\n

\n
    \n
  • \n

    Supported zones

    \n
      \n
    • \n

      Availability Zone

      \n
    • \n
    • \n

      Local Zone

      \n
    • \n
    \n
  • \n
  • \n

    Supported instance types

    \n
      \n
    • \n

      \n hpc6a.48xlarge | hpc6id.32xlarge |\n hpc7a.12xlarge | hpc7a.24xlarge |\n hpc7a.48xlarge | hpc7a.96xlarge |\n hpc7g.4xlarge | hpc7g.8xlarge |\n hpc7g.16xlarge\n

      \n
    • \n
    • \n

      \n p3dn.24xlarge | p4d.24xlarge |\n p4de.24xlarge | p5.48xlarge |\n p5e.48xlarge | p5en.48xlarge\n

      \n
    • \n
    • \n

      \n trn1.2xlarge | trn1.32xlarge |\n trn1n.32xlarge\n

      \n
    • \n
    \n
  • \n
\n

For more information, see Amazon EC2 instance\n topology in the Amazon EC2 User Guide.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -34309,7 +34380,7 @@ "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

One or more filters. Filter names and values are case-sensitive.

\n
    \n
  • \n

    \n auto-recovery-supported - Indicates whether Amazon CloudWatch action\n based recovery is supported (true | false).

    \n
  • \n
  • \n

    \n bare-metal - Indicates whether it is a bare metal instance type\n (true | false).

    \n
  • \n
  • \n

    \n burstable-performance-supported - Indicates whether the instance type is a\n burstable performance T instance type (true | false).

    \n
  • \n
  • \n

    \n current-generation - Indicates whether this instance type is the latest\n generation instance type of an instance family (true | false).

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline\n bandwidth performance for an EBS-optimized instance type, in Mbps.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage\n operations per second for an EBS-optimized instance type.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline\n throughput performance for an EBS-optimized instance type, in MB/s.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth\n performance for an EBS-optimized instance type, in Mbps.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage\n operations per second for an EBS-optimized instance type.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum\n throughput performance for an EBS-optimized instance type, in MB/s.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-support - Indicates whether the instance type is\n EBS-optimized (supported | unsupported |\n default).

    \n
  • \n
  • \n

    \n ebs-info.encryption-support - Indicates whether EBS encryption is supported\n (supported | unsupported).

    \n
  • \n
  • \n

    \n ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe)\n is supported for EBS volumes (required | supported |\n unsupported).

    \n
  • \n
  • \n

    \n free-tier-eligible - Indicates whether the instance type is eligible to use\n in the free tier (true | false).

    \n
  • \n
  • \n

    \n hibernation-supported - Indicates whether On-Demand hibernation is supported\n (true | false).

    \n
  • \n
  • \n

    \n hypervisor - The hypervisor (nitro | xen).

    \n
  • \n
  • \n

    \n instance-storage-info.disk.count - The number of local disks.

    \n
  • \n
  • \n

    \n instance-storage-info.disk.size-in-gb - The storage size of each instance\n storage disk, in GB.

    \n
  • \n
  • \n

    \n instance-storage-info.disk.type - The storage technology for the local\n instance storage disks (hdd | ssd).

    \n
  • \n
  • \n

    \n instance-storage-info.encryption-support - Indicates whether data is\n encrypted at rest (required | supported |\n unsupported).

    \n
  • \n
  • \n

    \n instance-storage-info.nvme-support - Indicates whether non-volatile memory\n express (NVMe) is supported for instance store (required | supported\n | unsupported).

    \n
  • \n
  • \n

    \n instance-storage-info.total-size-in-gb - The total amount of storage\n available from all local instance storage, in GB.

    \n
  • \n
  • \n

    \n instance-storage-supported - Indicates whether the instance type has local\n instance storage (true | false).

    \n
  • \n
  • \n

    \n instance-type - The instance type (for example c5.2xlarge or\n c5*).

    \n
  • \n
  • \n

    \n memory-info.size-in-mib - The memory size.

    \n
  • \n
  • \n

    \n network-info.efa-info.maximum-efa-interfaces - The maximum number of Elastic\n Fabric Adapters (EFAs) per instance.

    \n
  • \n
  • \n

    \n network-info.efa-supported - Indicates whether the instance type supports\n Elastic Fabric Adapter (EFA) (true | false).

    \n
  • \n
  • \n

    \n network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is\n supported or required (required | supported |\n unsupported).

    \n
  • \n
  • \n

    \n network-info.encryption-in-transit-supported - Indicates whether the instance\n type automatically encrypts in-transit traffic between instances (true | false).

    \n
  • \n
  • \n

    \n network-info.ipv4-addresses-per-interface - The maximum number of private\n IPv4 addresses per network interface.

    \n
  • \n
  • \n

    \n network-info.ipv6-addresses-per-interface - The maximum number of private\n IPv6 addresses per network interface.

    \n
  • \n
  • \n

    \n network-info.ipv6-supported - Indicates whether the instance type supports\n IPv6 (true | false).

    \n
  • \n
  • \n

    \n network-info.maximum-network-cards - The maximum number of network cards per\n instance.

    \n
  • \n
  • \n

    \n network-info.maximum-network-interfaces - The maximum number of network\n interfaces per instance.

    \n
  • \n
  • \n

    \n network-info.network-performance - The network performance (for example, \"25\n Gigabit\").

    \n
  • \n
  • \n

    \n nitro-enclaves-support - Indicates whether Nitro Enclaves is supported\n (supported | unsupported).

    \n
  • \n
  • \n

    \n nitro-tpm-support - Indicates whether NitroTPM is supported\n (supported | unsupported).

    \n
  • \n
  • \n

    \n nitro-tpm-info.supported-versions - The supported NitroTPM version\n (2.0).

    \n
  • \n
  • \n

    \n processor-info.supported-architecture - The CPU architecture\n (arm64 | i386 | x86_64).

    \n
  • \n
  • \n

    \n processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in\n GHz.

    \n
  • \n
  • \n

    \n processor-info.supported-features - The supported CPU features\n (amd-sev-snp).

    \n
  • \n
  • \n

    \n supported-boot-mode - The boot mode (legacy-bios |\n uefi).

    \n
  • \n
  • \n

    \n supported-root-device-type - The root device type (ebs |\n instance-store).

    \n
  • \n
  • \n

    \n supported-usage-class - The usage class (on-demand | spot | \n capacity-block).

    \n
  • \n
  • \n

    \n supported-virtualization-type - The virtualization type (hvm |\n paravirtual).

    \n
  • \n
  • \n

    \n vcpu-info.default-cores - The default number of cores for the instance\n type.

    \n
  • \n
  • \n

    \n vcpu-info.default-threads-per-core - The default number of threads per core\n for the instance type.

    \n
  • \n
  • \n

    \n vcpu-info.default-vcpus - The default number of vCPUs for the instance\n type.

    \n
  • \n
  • \n

    \n vcpu-info.valid-cores - The number of cores that can be configured for the\n instance type.

    \n
  • \n
  • \n

    \n vcpu-info.valid-threads-per-core - The number of threads per core that can be\n configured for the instance type. For example, \"1\" or \"1,2\".

    \n
  • \n
", + "smithy.api#documentation": "

One or more filters. Filter names and values are case-sensitive.

\n
    \n
  • \n

    \n auto-recovery-supported - Indicates whether Amazon CloudWatch action\n based recovery is supported (true | false).

    \n
  • \n
  • \n

    \n bare-metal - Indicates whether it is a bare metal instance type\n (true | false).

    \n
  • \n
  • \n

    \n burstable-performance-supported - Indicates whether the instance type is a\n burstable performance T instance type (true | false).

    \n
  • \n
  • \n

    \n current-generation - Indicates whether this instance type is the latest\n generation instance type of an instance family (true | false).

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline\n bandwidth performance for an EBS-optimized instance type, in Mbps.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage\n operations per second for an EBS-optimized instance type.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline\n throughput performance for an EBS-optimized instance type, in MB/s.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth\n performance for an EBS-optimized instance type, in Mbps.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage\n operations per second for an EBS-optimized instance type.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum\n throughput performance for an EBS-optimized instance type, in MB/s.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-support - Indicates whether the instance type is\n EBS-optimized (supported | unsupported |\n default).

    \n
  • \n
  • \n

    \n ebs-info.encryption-support - Indicates whether EBS encryption is supported\n (supported | unsupported).

    \n
  • \n
  • \n

    \n ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe)\n is supported for EBS volumes (required | supported |\n unsupported).

    \n
  • \n
  • \n

    \n free-tier-eligible - Indicates whether the instance type is eligible to use\n in the free tier (true | false).

    \n
  • \n
  • \n

    \n hibernation-supported - Indicates whether On-Demand hibernation is supported\n (true | false).

    \n
  • \n
  • \n

    \n hypervisor - The hypervisor (nitro | xen).

    \n
  • \n
  • \n

    \n instance-storage-info.disk.count - The number of local disks.

    \n
  • \n
  • \n

    \n instance-storage-info.disk.size-in-gb - The storage size of each instance\n storage disk, in GB.

    \n
  • \n
  • \n

    \n instance-storage-info.disk.type - The storage technology for the local\n instance storage disks (hdd | ssd).

    \n
  • \n
  • \n

    \n instance-storage-info.encryption-support - Indicates whether data is\n encrypted at rest (required | supported |\n unsupported).

    \n
  • \n
  • \n

    \n instance-storage-info.nvme-support - Indicates whether non-volatile memory\n express (NVMe) is supported for instance store (required | supported\n | unsupported).

    \n
  • \n
  • \n

    \n instance-storage-info.total-size-in-gb - The total amount of storage\n available from all local instance storage, in GB.

    \n
  • \n
  • \n

    \n instance-storage-supported - Indicates whether the instance type has local\n instance storage (true | false).

    \n
  • \n
  • \n

    \n instance-type - The instance type (for example c5.2xlarge or\n c5*).

    \n
  • \n
  • \n

    \n memory-info.size-in-mib - The memory size.

    \n
  • \n
  • \n

    \n network-info.bandwidth-weightings - For instances that support bandwidth \n weighting to boost performance (default, vpc-1, ebs-1).

    \n
  • \n
  • \n

    \n network-info.efa-info.maximum-efa-interfaces - The maximum number of Elastic\n Fabric Adapters (EFAs) per instance.

    \n
  • \n
  • \n

    \n network-info.efa-supported - Indicates whether the instance type supports\n Elastic Fabric Adapter (EFA) (true | false).

    \n
  • \n
  • \n

    \n network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is\n supported or required (required | supported |\n unsupported).

    \n
  • \n
  • \n

    \n network-info.encryption-in-transit-supported - Indicates whether the instance\n type automatically encrypts in-transit traffic between instances (true | false).

    \n
  • \n
  • \n

    \n network-info.ipv4-addresses-per-interface - The maximum number of private\n IPv4 addresses per network interface.

    \n
  • \n
  • \n

    \n network-info.ipv6-addresses-per-interface - The maximum number of private\n IPv6 addresses per network interface.

    \n
  • \n
  • \n

    \n network-info.ipv6-supported - Indicates whether the instance type supports\n IPv6 (true | false).

    \n
  • \n
  • \n

    \n network-info.maximum-network-cards - The maximum number of network cards per\n instance.

    \n
  • \n
  • \n

    \n network-info.maximum-network-interfaces - The maximum number of network\n interfaces per instance.

    \n
  • \n
  • \n

    \n network-info.network-performance - The network performance (for example, \"25\n Gigabit\").

    \n
  • \n
  • \n

    \n nitro-enclaves-support - Indicates whether Nitro Enclaves is supported\n (supported | unsupported).

    \n
  • \n
  • \n

    \n nitro-tpm-support - Indicates whether NitroTPM is supported\n (supported | unsupported).

    \n
  • \n
  • \n

    \n nitro-tpm-info.supported-versions - The supported NitroTPM version\n (2.0).

    \n
  • \n
  • \n

    \n processor-info.supported-architecture - The CPU architecture\n (arm64 | i386 | x86_64).

    \n
  • \n
  • \n

    \n processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in\n GHz.

    \n
  • \n
  • \n

    \n processor-info.supported-features - The supported CPU features\n (amd-sev-snp).

    \n
  • \n
  • \n

    \n supported-boot-mode - The boot mode (legacy-bios |\n uefi).

    \n
  • \n
  • \n

    \n supported-root-device-type - The root device type (ebs |\n instance-store).

    \n
  • \n
  • \n

    \n supported-usage-class - The usage class (on-demand | spot | \n capacity-block).

    \n
  • \n
  • \n

    \n supported-virtualization-type - The virtualization type (hvm |\n paravirtual).

    \n
  • \n
  • \n

    \n vcpu-info.default-cores - The default number of cores for the instance\n type.

    \n
  • \n
  • \n

    \n vcpu-info.default-threads-per-core - The default number of threads per core\n for the instance type.

    \n
  • \n
  • \n

    \n vcpu-info.default-vcpus - The default number of vCPUs for the instance\n type.

    \n
  • \n
  • \n

    \n vcpu-info.valid-cores - The number of cores that can be configured for the\n instance type.

    \n
  • \n
  • \n

    \n vcpu-info.valid-threads-per-core - The number of threads per core that can be\n configured for the instance type. For example, \"1\" or \"1,2\".

    \n
  • \n
", "smithy.api#xmlName": "Filter" } }, @@ -34599,7 +34670,7 @@ "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n affinity - The affinity setting for an instance running on a\n Dedicated Host (default | host).

    \n
  • \n
  • \n

    \n architecture - The instance architecture (i386 |\n x86_64 | arm64).

    \n
  • \n
  • \n

    \n availability-zone - The Availability Zone of the instance.

    \n
  • \n
  • \n

    \n block-device-mapping.attach-time - The attach time for an EBS\n volume mapped to the instance, for example,\n 2022-09-15T17:15:20.000Z.

    \n
  • \n
  • \n

    \n block-device-mapping.delete-on-termination - A Boolean that\n indicates whether the EBS volume is deleted on instance termination.

    \n
  • \n
  • \n

    \n block-device-mapping.device-name - The device name specified in\n the block device mapping (for example, /dev/sdh or\n xvdh).

    \n
  • \n
  • \n

    \n block-device-mapping.status - The status for the EBS volume\n (attaching | attached | detaching |\n detached).

    \n
  • \n
  • \n

    \n block-device-mapping.volume-id - The volume ID of the EBS\n volume.

    \n
  • \n
  • \n

    \n boot-mode - The boot mode that was specified by the AMI\n (legacy-bios | uefi |\n uefi-preferred).

    \n
  • \n
  • \n

    \n capacity-reservation-id - The ID of the Capacity Reservation into which the\n instance was launched.

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-preference\n - The instance's Capacity Reservation preference (open | none).

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id\n - The ID of the targeted Capacity Reservation.

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn\n - The ARN of the targeted Capacity Reservation group.

    \n
  • \n
  • \n

    \n client-token - The idempotency token you provided when you\n launched the instance.

    \n
  • \n
  • \n

    \n current-instance-boot-mode - The boot mode that is used to launch\n the instance at launch or start (legacy-bios |\n uefi).

    \n
  • \n
  • \n

    \n dns-name - The public DNS name of the instance.

    \n
  • \n
  • \n

    \n ebs-optimized - A Boolean that indicates whether the instance is\n optimized for Amazon EBS I/O.

    \n
  • \n
  • \n

    \n ena-support - A Boolean that indicates whether the instance is\n enabled for enhanced networking with ENA.

    \n
  • \n
  • \n

    \n enclave-options.enabled - A Boolean that indicates whether the\n instance is enabled for Amazon Web Services Nitro Enclaves.

    \n
  • \n
  • \n

    \n hibernation-options.configured - A Boolean that indicates whether\n the instance is enabled for hibernation. A value of true means that\n the instance is enabled for hibernation.

    \n
  • \n
  • \n

    \n host-id - The ID of the Dedicated Host on which the instance is\n running, if applicable.

    \n
  • \n
  • \n

    \n hypervisor - The hypervisor type of the instance\n (ovm | xen). The value xen is used\n for both Xen and Nitro hypervisors.

    \n
  • \n
  • \n

    \n iam-instance-profile.arn - The instance profile associated with\n the instance. Specified as an ARN.

    \n
  • \n
  • \n

    \n iam-instance-profile.id - The instance profile associated with\n the instance. Specified as an ID.

    \n
  • \n
  • \n

    \n image-id - The ID of the image used to launch the\n instance.

    \n
  • \n
  • \n

    \n instance-id - The ID of the instance.

    \n
  • \n
  • \n

    \n instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or\n a Capacity Block (spot | scheduled | capacity-block).

    \n
  • \n
  • \n

    \n instance-state-code - The state of the instance, as a 16-bit\n unsigned integer. The high byte is used for internal purposes and should be\n ignored. The low byte is set based on the state represented. The valid values\n are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64\n (stopping), and 80 (stopped).

    \n
  • \n
  • \n

    \n instance-state-name - The state of the instance\n (pending | running | shutting-down |\n terminated | stopping |\n stopped).

    \n
  • \n
  • \n

    \n instance-type - The type of instance (for example,\n t2.micro).

    \n
  • \n
  • \n

    \n instance.group-id - The ID of the security group for the\n instance.

    \n
  • \n
  • \n

    \n instance.group-name - The name of the security group for the\n instance.

    \n
  • \n
  • \n

    \n ip-address - The public IPv4 address of the instance.

    \n
  • \n
  • \n

    \n ipv6-address - The IPv6 address of the instance.

    \n
  • \n
  • \n

    \n kernel-id - The kernel ID.

    \n
  • \n
  • \n

    \n key-name - The name of the key pair used when the instance was\n launched.

    \n
  • \n
  • \n

    \n launch-index - When launching multiple instances, this is the\n index for the instance in the launch group (for example, 0, 1, 2, and so on).\n

    \n
  • \n
  • \n

    \n launch-time - The time when the instance was launched, in the ISO\n 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example,\n 2021-09-29T11:04:43.305Z. You can use a wildcard\n (*), for example, 2021-09-29T*, which matches an\n entire day.

    \n
  • \n
  • \n

    \n maintenance-options.auto-recovery - The current automatic\n recovery behavior of the instance (disabled | default).

    \n
  • \n
  • \n

    \n metadata-options.http-endpoint - The status of access to the HTTP\n metadata endpoint on your instance (enabled |\n disabled)

    \n
  • \n
  • \n

    \n metadata-options.http-protocol-ipv4 - Indicates whether the IPv4\n endpoint is enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n metadata-options.http-protocol-ipv6 - Indicates whether the IPv6\n endpoint is enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n metadata-options.http-put-response-hop-limit - The HTTP metadata\n request put response hop limit (integer, possible values 1 to\n 64)

    \n
  • \n
  • \n

    \n metadata-options.http-tokens - The metadata request authorization\n state (optional | required)

    \n
  • \n
  • \n

    \n metadata-options.instance-metadata-tags - The status of access to\n instance tags from the instance metadata (enabled |\n disabled)

    \n
  • \n
  • \n

    \n metadata-options.state - The state of the metadata option changes\n (pending | applied).

    \n
  • \n
  • \n

    \n monitoring-state - Indicates whether detailed monitoring is\n enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n network-interface.addresses.association.allocation-id - The allocation ID.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.association-id - The association ID.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.carrier-ip - The carrier IP address.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.customer-owned-ip - The customer-owned IP address.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.ip-owner-id - The owner\n ID of the private IPv4 address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.public-dns-name - The public DNS name.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.public-ip - The ID of the\n association of an Elastic IP address (IPv4) with a network interface.

    \n
  • \n
  • \n

    \n network-interface.addresses.primary - Specifies whether the IPv4\n address of the network interface is the primary private IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.addresses.private-dns-name - The private DNS name.

    \n
  • \n
  • \n

    \n network-interface.addresses.private-ip-address - The private IPv4\n address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.association.allocation-id - The allocation ID\n returned when you allocated the Elastic IP address (IPv4) for your network\n interface.

    \n
  • \n
  • \n

    \n network-interface.association.association-id - The association ID\n returned when the network interface was associated with an IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.association.carrier-ip - The customer-owned IP address.

    \n
  • \n
  • \n

    \n network-interface.association.customer-owned-ip - The customer-owned IP address.

    \n
  • \n
  • \n

    \n network-interface.association.ip-owner-id - The owner of the\n Elastic IP address (IPv4) associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.association.public-dns-name - The public DNS name.

    \n
  • \n
  • \n

    \n network-interface.association.public-ip - The address of the\n Elastic IP address (IPv4) bound to the network interface.

    \n
  • \n
  • \n

    \n network-interface.attachment.attach-time - The time that the\n network interface was attached to an instance.

    \n
  • \n
  • \n

    \n network-interface.attachment.attachment-id - The ID of the\n interface attachment.

    \n
  • \n
  • \n

    \n network-interface.attachment.delete-on-termination - Specifies\n whether the attachment is deleted when an instance is terminated.

    \n
  • \n
  • \n

    \n network-interface.attachment.device-index - The device index to\n which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.instance-id - The ID of the instance\n to which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.instance-owner-id - The owner ID of\n the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.network-card-index - The index of the network card.

    \n
  • \n
  • \n

    \n network-interface.attachment.status - The status of the\n attachment (attaching | attached |\n detaching | detached).

    \n
  • \n
  • \n

    \n network-interface.availability-zone - The Availability Zone for\n the network interface.

    \n
  • \n
  • \n

    \n network-interface.deny-all-igw-traffic - A Boolean that indicates whether \n a network interface with an IPv6 address is unreachable from the public internet.

    \n
  • \n
  • \n

    \n network-interface.description - The description of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.group-id - The ID of a security group\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.group-name - The name of a security group\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-address - The IPv6 address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-addresses.ipv6-address - The IPv6 address\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this\n is the primary IPv6 address.

    \n
  • \n
  • \n

    \n network-interface.ipv6-native - A Boolean that indicates whether this is\n an IPv6 only network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.mac-address - The MAC address of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.network-interface-id - The ID of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.operator.managed - A Boolean that indicates\n whether the instance has a managed network interface.

    \n
  • \n
  • \n

    \n network-interface.operator.principal - The principal that manages\n the network interface. Only valid for instances with managed network interfaces,\n where managed is true.

    \n
  • \n
  • \n

    \n network-interface.outpost-arn - The ARN of the Outpost.

    \n
  • \n
  • \n

    \n network-interface.owner-id - The ID of the owner of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.private-dns-name - The private DNS name of the\n network interface.

    \n
  • \n
  • \n

    \n network-interface.private-ip-address - The private IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.public-dns-name - The public DNS name.

    \n
  • \n
  • \n

    \n network-interface.requester-id - The requester ID for the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.requester-managed - Indicates whether the\n network interface is being managed by Amazon Web Services.

    \n
  • \n
  • \n

    \n network-interface.status - The status of the network interface\n (available) | in-use).

    \n
  • \n
  • \n

    \n network-interface.source-dest-check - Whether the network\n interface performs source/destination checking. A value of true\n means that checking is enabled, and false means that checking is\n disabled. The value must be false for the network interface to\n perform network address translation (NAT) in your VPC.

    \n
  • \n
  • \n

    \n network-interface.subnet-id - The ID of the subnet for the\n network interface.

    \n
  • \n
  • \n

    \n network-interface.tag-key - The key of a tag assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.tag-value - The value of a tag assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.vpc-id - The ID of the VPC for the network\n interface.

    \n
  • \n
  • \n

    \n operator.managed - A Boolean that indicates whether this is a\n managed instance.

    \n
  • \n
  • \n

    \n operator.principal - The principal that manages the instance.\n Only valid for managed instances, where managed is\n true.

    \n
  • \n
  • \n

    \n outpost-arn - The Amazon Resource Name (ARN) of the\n Outpost.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the instance\n owner.

    \n
  • \n
  • \n

    \n placement-group-name - The name of the placement group for the\n instance.

    \n
  • \n
  • \n

    \n placement-partition-number - The partition in which the instance is\n located.

    \n
  • \n
  • \n

    \n platform - The platform. To list only Windows instances, use\n windows.

    \n
  • \n
  • \n

    \n platform-details - The platform (Linux/UNIX |\n Red Hat BYOL Linux | Red Hat Enterprise Linux |\n Red Hat Enterprise Linux with HA | Red Hat Enterprise\n Linux with SQL Server Standard and HA | Red Hat Enterprise\n Linux with SQL Server Enterprise and HA | Red Hat Enterprise\n Linux with SQL Server Standard | Red Hat Enterprise Linux with\n SQL Server Web | Red Hat Enterprise Linux with SQL Server\n Enterprise | SQL Server Enterprise | SQL Server\n Standard | SQL Server Web | SUSE Linux |\n Ubuntu Pro | Windows | Windows BYOL |\n Windows with SQL Server Enterprise | Windows with SQL\n Server Standard | Windows with SQL Server Web).

    \n
  • \n
  • \n

    \n private-dns-name - The private IPv4 DNS name of the\n instance.

    \n
  • \n
  • \n

    \n private-dns-name-options.enable-resource-name-dns-a-record - A\n Boolean that indicates whether to respond to DNS queries for instance hostnames\n with DNS A records.

    \n
  • \n
  • \n

    \n private-dns-name-options.enable-resource-name-dns-aaaa-record - A\n Boolean that indicates whether to respond to DNS queries for instance hostnames\n with DNS AAAA records.

    \n
  • \n
  • \n

    \n private-dns-name-options.hostname-type - The type of hostname\n (ip-name | resource-name).

    \n
  • \n
  • \n

    \n private-ip-address - The private IPv4 address of the instance.\n This can only be used to filter by the primary IP address of the network\n interface attached to the instance. To filter by additional IP addresses\n assigned to the network interface, use the filter\n network-interface.addresses.private-ip-address.

    \n
  • \n
  • \n

    \n product-code - The product code associated with the AMI used to\n launch the instance.

    \n
  • \n
  • \n

    \n product-code.type - The type of product code (devpay\n | marketplace).

    \n
  • \n
  • \n

    \n ramdisk-id - The RAM disk ID.

    \n
  • \n
  • \n

    \n reason - The reason for the current state of the instance (for\n example, shows \"User Initiated [date]\" when you stop or terminate the instance).\n Similar to the state-reason-code filter.

    \n
  • \n
  • \n

    \n requester-id - The ID of the entity that launched the instance on\n your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so\n on).

    \n
  • \n
  • \n

    \n reservation-id - The ID of the instance's reservation. A\n reservation ID is created any time you launch an instance. A reservation ID has\n a one-to-one relationship with an instance launch request, but can be associated\n with more than one instance if you launch multiple instances using the same\n launch request. For example, if you launch one instance, you get one reservation\n ID. If you launch ten instances using the same launch request, you also get one\n reservation ID.

    \n
  • \n
  • \n

    \n root-device-name - The device name of the root device volume (for\n example, /dev/sda1).

    \n
  • \n
  • \n

    \n root-device-type - The type of the root device volume\n (ebs | instance-store).

    \n
  • \n
  • \n

    \n source-dest-check - Indicates whether the instance performs\n source/destination checking. A value of true means that checking is\n enabled, and false means that checking is disabled. The value must\n be false for the instance to perform network address translation\n (NAT) in your VPC.

    \n
  • \n
  • \n

    \n spot-instance-request-id - The ID of the Spot Instance\n request.

    \n
  • \n
  • \n

    \n state-reason-code - The reason code for the state change.

    \n
  • \n
  • \n

    \n state-reason-message - A message that describes the state\n change.

    \n
  • \n
  • \n

    \n subnet-id - The ID of the subnet for the instance.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n tenancy - The tenancy of an instance (dedicated |\n default | host).

    \n
  • \n
  • \n

    \n tpm-support - Indicates if the instance is configured for\n NitroTPM support (v2.0).

    \n
  • \n
  • \n

    \n usage-operation - The usage operation value for the instance\n (RunInstances | RunInstances:00g0 |\n RunInstances:0010 | RunInstances:1010 |\n RunInstances:1014 | RunInstances:1110 |\n RunInstances:0014 | RunInstances:0210 |\n RunInstances:0110 | RunInstances:0100 |\n RunInstances:0004 | RunInstances:0200 |\n RunInstances:000g | RunInstances:0g00 |\n RunInstances:0002 | RunInstances:0800 |\n RunInstances:0102 | RunInstances:0006 |\n RunInstances:0202).

    \n
  • \n
  • \n

    \n usage-operation-update-time - The time that the usage operation\n was last updated, for example, 2022-09-15T17:15:20.000Z.

    \n
  • \n
  • \n

    \n virtualization-type - The virtualization type of the instance\n (paravirtual | hvm).

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC that the instance is running in.

    \n
  • \n
", + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n affinity - The affinity setting for an instance running on a\n Dedicated Host (default | host).

    \n
  • \n
  • \n

    \n architecture - The instance architecture (i386 |\n x86_64 | arm64).

    \n
  • \n
  • \n

    \n availability-zone - The Availability Zone of the instance.

    \n
  • \n
  • \n

    \n block-device-mapping.attach-time - The attach time for an EBS\n volume mapped to the instance, for example,\n 2022-09-15T17:15:20.000Z.

    \n
  • \n
  • \n

    \n block-device-mapping.delete-on-termination - A Boolean that\n indicates whether the EBS volume is deleted on instance termination.

    \n
  • \n
  • \n

    \n block-device-mapping.device-name - The device name specified in\n the block device mapping (for example, /dev/sdh or\n xvdh).

    \n
  • \n
  • \n

    \n block-device-mapping.status - The status for the EBS volume\n (attaching | attached | detaching |\n detached).

    \n
  • \n
  • \n

    \n block-device-mapping.volume-id - The volume ID of the EBS\n volume.

    \n
  • \n
  • \n

    \n boot-mode - The boot mode that was specified by the AMI\n (legacy-bios | uefi |\n uefi-preferred).

    \n
  • \n
  • \n

    \n capacity-reservation-id - The ID of the Capacity Reservation into which the\n instance was launched.

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-preference\n - The instance's Capacity Reservation preference (open | none).

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id\n - The ID of the targeted Capacity Reservation.

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn\n - The ARN of the targeted Capacity Reservation group.

    \n
  • \n
  • \n

    \n client-token - The idempotency token you provided when you\n launched the instance.

    \n
  • \n
  • \n

    \n current-instance-boot-mode - The boot mode that is used to launch\n the instance at launch or start (legacy-bios |\n uefi).

    \n
  • \n
  • \n

    \n dns-name - The public DNS name of the instance.

    \n
  • \n
  • \n

    \n ebs-optimized - A Boolean that indicates whether the instance is\n optimized for Amazon EBS I/O.

    \n
  • \n
  • \n

    \n ena-support - A Boolean that indicates whether the instance is\n enabled for enhanced networking with ENA.

    \n
  • \n
  • \n

    \n enclave-options.enabled - A Boolean that indicates whether the\n instance is enabled for Amazon Web Services Nitro Enclaves.

    \n
  • \n
  • \n

    \n hibernation-options.configured - A Boolean that indicates whether\n the instance is enabled for hibernation. A value of true means that\n the instance is enabled for hibernation.

    \n
  • \n
  • \n

    \n host-id - The ID of the Dedicated Host on which the instance is\n running, if applicable.

    \n
  • \n
  • \n

    \n hypervisor - The hypervisor type of the instance\n (ovm | xen). The value xen is used\n for both Xen and Nitro hypervisors.

    \n
  • \n
  • \n

    \n iam-instance-profile.arn - The instance profile associated with\n the instance. Specified as an ARN.

    \n
  • \n
  • \n

    \n iam-instance-profile.id - The instance profile associated with\n the instance. Specified as an ID.

    \n
  • \n
  • \n

    \n image-id - The ID of the image used to launch the\n instance.

    \n
  • \n
  • \n

    \n instance-id - The ID of the instance.

    \n
  • \n
  • \n

    \n instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or\n a Capacity Block (spot | scheduled | capacity-block).

    \n
  • \n
  • \n

    \n instance-state-code - The state of the instance, as a 16-bit\n unsigned integer. The high byte is used for internal purposes and should be\n ignored. The low byte is set based on the state represented. The valid values\n are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64\n (stopping), and 80 (stopped).

    \n
  • \n
  • \n

    \n instance-state-name - The state of the instance\n (pending | running | shutting-down |\n terminated | stopping |\n stopped).

    \n
  • \n
  • \n

    \n instance-type - The type of instance (for example,\n t2.micro).

    \n
  • \n
  • \n

    \n instance.group-id - The ID of the security group for the\n instance.

    \n
  • \n
  • \n

    \n instance.group-name - The name of the security group for the\n instance.

    \n
  • \n
  • \n

    \n ip-address - The public IPv4 address of the instance.

    \n
  • \n
  • \n

    \n ipv6-address - The IPv6 address of the instance.

    \n
  • \n
  • \n

    \n kernel-id - The kernel ID.

    \n
  • \n
  • \n

    \n key-name - The name of the key pair used when the instance was\n launched.

    \n
  • \n
  • \n

    \n launch-index - When launching multiple instances, this is the\n index for the instance in the launch group (for example, 0, 1, 2, and so on).\n

    \n
  • \n
  • \n

    \n launch-time - The time when the instance was launched, in the ISO\n 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example,\n 2021-09-29T11:04:43.305Z. You can use a wildcard\n (*), for example, 2021-09-29T*, which matches an\n entire day.

    \n
  • \n
  • \n

    \n maintenance-options.auto-recovery - The current automatic\n recovery behavior of the instance (disabled | default).

    \n
  • \n
  • \n

    \n metadata-options.http-endpoint - The status of access to the HTTP\n metadata endpoint on your instance (enabled |\n disabled)

    \n
  • \n
  • \n

    \n metadata-options.http-protocol-ipv4 - Indicates whether the IPv4\n endpoint is enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n metadata-options.http-protocol-ipv6 - Indicates whether the IPv6\n endpoint is enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n metadata-options.http-put-response-hop-limit - The HTTP metadata\n request put response hop limit (integer, possible values 1 to\n 64)

    \n
  • \n
  • \n

    \n metadata-options.http-tokens - The metadata request authorization\n state (optional | required)

    \n
  • \n
  • \n

    \n metadata-options.instance-metadata-tags - The status of access to\n instance tags from the instance metadata (enabled |\n disabled)

    \n
  • \n
  • \n

    \n metadata-options.state - The state of the metadata option changes\n (pending | applied).

    \n
  • \n
  • \n

    \n monitoring-state - Indicates whether detailed monitoring is\n enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n network-interface.addresses.association.allocation-id - The allocation ID.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.association-id - The association ID.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.carrier-ip - The carrier IP address.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.customer-owned-ip - The customer-owned IP address.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.ip-owner-id - The owner\n ID of the private IPv4 address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.public-dns-name - The public DNS name.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.public-ip - The ID of the\n association of an Elastic IP address (IPv4) with a network interface.

    \n
  • \n
  • \n

    \n network-interface.addresses.primary - Specifies whether the IPv4\n address of the network interface is the primary private IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.addresses.private-dns-name - The private DNS name.

    \n
  • \n
  • \n

    \n network-interface.addresses.private-ip-address - The private IPv4\n address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.association.allocation-id - The allocation ID\n returned when you allocated the Elastic IP address (IPv4) for your network\n interface.

    \n
  • \n
  • \n

    \n network-interface.association.association-id - The association ID\n returned when the network interface was associated with an IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.association.carrier-ip - The customer-owned IP address.

    \n
  • \n
  • \n

    \n network-interface.association.customer-owned-ip - The customer-owned IP address.

    \n
  • \n
  • \n

    \n network-interface.association.ip-owner-id - The owner of the\n Elastic IP address (IPv4) associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.association.public-dns-name - The public DNS name.

    \n
  • \n
  • \n

    \n network-interface.association.public-ip - The address of the\n Elastic IP address (IPv4) bound to the network interface.

    \n
  • \n
  • \n

    \n network-interface.attachment.attach-time - The time that the\n network interface was attached to an instance.

    \n
  • \n
  • \n

    \n network-interface.attachment.attachment-id - The ID of the\n interface attachment.

    \n
  • \n
  • \n

    \n network-interface.attachment.delete-on-termination - Specifies\n whether the attachment is deleted when an instance is terminated.

    \n
  • \n
  • \n

    \n network-interface.attachment.device-index - The device index to\n which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.instance-id - The ID of the instance\n to which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.instance-owner-id - The owner ID of\n the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.network-card-index - The index of the network card.

    \n
  • \n
  • \n

    \n network-interface.attachment.status - The status of the\n attachment (attaching | attached |\n detaching | detached).

    \n
  • \n
  • \n

    \n network-interface.availability-zone - The Availability Zone for\n the network interface.

    \n
  • \n
  • \n

    \n network-interface.deny-all-igw-traffic - A Boolean that indicates whether \n a network interface with an IPv6 address is unreachable from the public internet.

    \n
  • \n
  • \n

    \n network-interface.description - The description of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.group-id - The ID of a security group\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.group-name - The name of a security group\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-address - The IPv6 address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-addresses.ipv6-address - The IPv6 address\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this\n is the primary IPv6 address.

    \n
  • \n
  • \n

    \n network-interface.ipv6-native - A Boolean that indicates whether this is\n an IPv6 only network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.mac-address - The MAC address of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.network-interface-id - The ID of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.operator.managed - A Boolean that indicates\n whether the instance has a managed network interface.

    \n
  • \n
  • \n

    \n network-interface.operator.principal - The principal that manages\n the network interface. Only valid for instances with managed network interfaces,\n where managed is true.

    \n
  • \n
  • \n

    \n network-interface.outpost-arn - The ARN of the Outpost.

    \n
  • \n
  • \n

    \n network-interface.owner-id - The ID of the owner of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.private-dns-name - The private DNS name of the\n network interface.

    \n
  • \n
  • \n

    \n network-interface.private-ip-address - The private IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.public-dns-name - The public DNS name.

    \n
  • \n
  • \n

    \n network-interface.requester-id - The requester ID for the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.requester-managed - Indicates whether the\n network interface is being managed by Amazon Web Services.

    \n
  • \n
  • \n

    \n network-interface.status - The status of the network interface\n (available) | in-use).

    \n
  • \n
  • \n

    \n network-interface.source-dest-check - Whether the network\n interface performs source/destination checking. A value of true\n means that checking is enabled, and false means that checking is\n disabled. The value must be false for the network interface to\n perform network address translation (NAT) in your VPC.

    \n
  • \n
  • \n

    \n network-interface.subnet-id - The ID of the subnet for the\n network interface.

    \n
  • \n
  • \n

    \n network-interface.tag-key - The key of a tag assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.tag-value - The value of a tag assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.vpc-id - The ID of the VPC for the network\n interface.

    \n
  • \n
  • \n

    \n network-performance-options.bandwidth-weighting - Where the performance boost \n \t\t\tis applied, if applicable. Valid values: default, vpc-1, \n \t\t\tebs-1.

    \n
  • \n
  • \n

    \n operator.managed - A Boolean that indicates whether this is a\n managed instance.

    \n
  • \n
  • \n

    \n operator.principal - The principal that manages the instance.\n Only valid for managed instances, where managed is\n true.

    \n
  • \n
  • \n

    \n outpost-arn - The Amazon Resource Name (ARN) of the\n Outpost.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the instance\n owner.

    \n
  • \n
  • \n

    \n placement-group-name - The name of the placement group for the\n instance.

    \n
  • \n
  • \n

    \n placement-partition-number - The partition in which the instance is\n located.

    \n
  • \n
  • \n

    \n platform - The platform. To list only Windows instances, use\n windows.

    \n
  • \n
  • \n

    \n platform-details - The platform (Linux/UNIX |\n Red Hat BYOL Linux | Red Hat Enterprise Linux |\n Red Hat Enterprise Linux with HA | Red Hat Enterprise\n Linux with SQL Server Standard and HA | Red Hat Enterprise\n Linux with SQL Server Enterprise and HA | Red Hat Enterprise\n Linux with SQL Server Standard | Red Hat Enterprise Linux with\n SQL Server Web | Red Hat Enterprise Linux with SQL Server\n Enterprise | SQL Server Enterprise | SQL Server\n Standard | SQL Server Web | SUSE Linux |\n Ubuntu Pro | Windows | Windows BYOL |\n Windows with SQL Server Enterprise | Windows with SQL\n Server Standard | Windows with SQL Server Web).

    \n
  • \n
  • \n

    \n private-dns-name - The private IPv4 DNS name of the\n instance.

    \n
  • \n
  • \n

    \n private-dns-name-options.enable-resource-name-dns-a-record - A\n Boolean that indicates whether to respond to DNS queries for instance hostnames\n with DNS A records.

    \n
  • \n
  • \n

    \n private-dns-name-options.enable-resource-name-dns-aaaa-record - A\n Boolean that indicates whether to respond to DNS queries for instance hostnames\n with DNS AAAA records.

    \n
  • \n
  • \n

    \n private-dns-name-options.hostname-type - The type of hostname\n (ip-name | resource-name).

    \n
  • \n
  • \n

    \n private-ip-address - The private IPv4 address of the instance.\n This can only be used to filter by the primary IP address of the network\n interface attached to the instance. To filter by additional IP addresses\n assigned to the network interface, use the filter\n network-interface.addresses.private-ip-address.

    \n
  • \n
  • \n

    \n product-code - The product code associated with the AMI used to\n launch the instance.

    \n
  • \n
  • \n

    \n product-code.type - The type of product code (devpay\n | marketplace).

    \n
  • \n
  • \n

    \n ramdisk-id - The RAM disk ID.

    \n
  • \n
  • \n

    \n reason - The reason for the current state of the instance (for\n example, shows \"User Initiated [date]\" when you stop or terminate the instance).\n Similar to the state-reason-code filter.

    \n
  • \n
  • \n

    \n requester-id - The ID of the entity that launched the instance on\n your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so\n on).

    \n
  • \n
  • \n

    \n reservation-id - The ID of the instance's reservation. A\n reservation ID is created any time you launch an instance. A reservation ID has\n a one-to-one relationship with an instance launch request, but can be associated\n with more than one instance if you launch multiple instances using the same\n launch request. For example, if you launch one instance, you get one reservation\n ID. If you launch ten instances using the same launch request, you also get one\n reservation ID.

    \n
  • \n
  • \n

    \n root-device-name - The device name of the root device volume (for\n example, /dev/sda1).

    \n
  • \n
  • \n

    \n root-device-type - The type of the root device volume\n (ebs | instance-store).

    \n
  • \n
  • \n

    \n source-dest-check - Indicates whether the instance performs\n source/destination checking. A value of true means that checking is\n enabled, and false means that checking is disabled. The value must\n be false for the instance to perform network address translation\n (NAT) in your VPC.

    \n
  • \n
  • \n

    \n spot-instance-request-id - The ID of the Spot Instance\n request.

    \n
  • \n
  • \n

    \n state-reason-code - The reason code for the state change.

    \n
  • \n
  • \n

    \n state-reason-message - A message that describes the state\n change.

    \n
  • \n
  • \n

    \n subnet-id - The ID of the subnet for the instance.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n tenancy - The tenancy of an instance (dedicated |\n default | host).

    \n
  • \n
  • \n

    \n tpm-support - Indicates if the instance is configured for\n NitroTPM support (v2.0).

    \n
  • \n
  • \n

    \n usage-operation - The usage operation value for the instance\n (RunInstances | RunInstances:00g0 |\n RunInstances:0010 | RunInstances:1010 |\n RunInstances:1014 | RunInstances:1110 |\n RunInstances:0014 | RunInstances:0210 |\n RunInstances:0110 | RunInstances:0100 |\n RunInstances:0004 | RunInstances:0200 |\n RunInstances:000g | RunInstances:0g00 |\n RunInstances:0002 | RunInstances:0800 |\n RunInstances:0102 | RunInstances:0006 |\n RunInstances:0202).

    \n
  • \n
  • \n

    \n usage-operation-update-time - The time that the usage operation\n was last updated, for example, 2022-09-15T17:15:20.000Z.

    \n
  • \n
  • \n

    \n virtualization-type - The virtualization type of the instance\n (paravirtual | hvm).

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC that the instance is running in.

    \n
  • \n
", "smithy.api#xmlName": "Filter" } }, @@ -37815,7 +37886,7 @@ "target": "com.amazonaws.ec2#FilterList", "traits": { "aws.protocols#ec2QueryName": "Filter", - "smithy.api#documentation": "

One or more filters.

\n
    \n
  • \n

    \n association.allocation-id - The allocation ID returned when you\n\t\t allocated the Elastic IP address (IPv4) for your network interface.

    \n
  • \n
  • \n

    \n association.association-id - The association ID returned when the\n\t\t network interface was associated with an IPv4 address.

    \n
  • \n
  • \n

    \n addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

    \n
  • \n
  • \n

    \n addresses.association.public-ip - The association ID returned when\n\t\t the network interface was associated with the Elastic IP address\n\t\t (IPv4).

    \n
  • \n
  • \n

    \n addresses.primary - Whether the private IPv4 address is the primary\n IP address associated with the network interface.

    \n
  • \n
  • \n

    \n addresses.private-ip-address - The private IPv4 addresses\n\t\t associated with the network interface.

    \n
  • \n
  • \n

    \n association.ip-owner-id - The owner of the Elastic IP address\n (IPv4) associated with the network interface.

    \n
  • \n
  • \n

    \n association.public-ip - The address of the Elastic IP address\n (IPv4) bound to the network interface.

    \n
  • \n
  • \n

    \n association.public-dns-name - The public DNS name for the network\n interface (IPv4).

    \n
  • \n
  • \n

    \n attachment.attach-time - The time that the network interface was attached to an instance.

    \n
  • \n
  • \n

    \n attachment.attachment-id - The ID of the interface attachment.

    \n
  • \n
  • \n

    \n attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

    \n
  • \n
  • \n

    \n attachment.device-index - The device index to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.instance-id - The ID of the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    \n
  • \n
  • \n

    \n availability-zone - The Availability Zone of the network interface.

    \n
  • \n
  • \n

    \n description - The description of the network interface.

    \n
  • \n
  • \n

    \n group-id - The ID of a security group associated with the network interface.

    \n
  • \n
  • \n

    \n ipv6-addresses.ipv6-address - An IPv6 address associated with\n the network interface.

    \n
  • \n
  • \n

    \n interface-type - The type of network interface (api_gateway_managed | \n\t\t aws_codestar_connections_managed | branch | ec2_instance_connect_endpoint | \n\t\t efa | efa-only | efs | gateway_load_balancer | \n\t\t gateway_load_balancer_endpoint | global_accelerator_managed | interface | \n\t\t iot_rules_managed | lambda | load_balancer | nat_gateway | \n\t\t network_load_balancer | quicksight | transit_gateway | trunk | \n\t\t vpc_endpoint).

    \n
  • \n
  • \n

    \n mac-address - The MAC address of the network interface.

    \n
  • \n
  • \n

    \n network-interface-id - The ID of the network interface.

    \n
  • \n
  • \n

    \n operator.managed - A Boolean that indicates whether this is a\n managed network interface.

    \n
  • \n
  • \n

    \n operator.principal - The principal that manages the network\n interface. Only valid for managed network interfaces, where managed\n is true.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the network interface owner.

    \n
  • \n
  • \n

    \n private-dns-name - The private DNS name of the network interface (IPv4).

    \n
  • \n
  • \n

    \n private-ip-address - The private IPv4 address or addresses of the\n network interface.

    \n
  • \n
  • \n

    \n requester-id - The alias or Amazon Web Services account ID of the principal or service that created the network interface.

    \n
  • \n
  • \n

    \n requester-managed - Indicates whether the network interface is being managed by an Amazon Web Services \n\t\t service (for example, Amazon Web Services Management Console, Auto Scaling, and so on).

    \n
  • \n
  • \n

    \n source-dest-check - Indicates whether the network interface performs source/destination checking. \n\t\t A value of true means checking is enabled, and false means checking is disabled. \n\t\t The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    \n
  • \n
  • \n

    \n status - The status of the network interface. If the network interface is not attached to an instance, the status is available; \n\t\t if a network interface is attached to an instance the status is in-use.

    \n
  • \n
  • \n

    \n subnet-id - The ID of the subnet for the network interface.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC for the network interface.

    \n
  • \n
", + "smithy.api#documentation": "

One or more filters.

\n
    \n
  • \n

    \n association.allocation-id - The allocation ID returned when you\n\t\t allocated the Elastic IP address (IPv4) for your network interface.

    \n
  • \n
  • \n

    \n association.association-id - The association ID returned when the\n\t\t network interface was associated with an IPv4 address.

    \n
  • \n
  • \n

    \n addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

    \n
  • \n
  • \n

    \n addresses.association.public-ip - The association ID returned when\n\t\t the network interface was associated with the Elastic IP address\n\t\t (IPv4).

    \n
  • \n
  • \n

    \n addresses.primary - Whether the private IPv4 address is the primary\n IP address associated with the network interface.

    \n
  • \n
  • \n

    \n addresses.private-ip-address - The private IPv4 addresses\n\t\t associated with the network interface.

    \n
  • \n
  • \n

    \n association.ip-owner-id - The owner of the Elastic IP address\n (IPv4) associated with the network interface.

    \n
  • \n
  • \n

    \n association.public-ip - The address of the Elastic IP address\n (IPv4) bound to the network interface.

    \n
  • \n
  • \n

    \n association.public-dns-name - The public DNS name for the network\n interface (IPv4).

    \n
  • \n
  • \n

    \n attachment.attach-time - The time that the network interface was attached to an instance.

    \n
  • \n
  • \n

    \n attachment.attachment-id - The ID of the interface attachment.

    \n
  • \n
  • \n

    \n attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

    \n
  • \n
  • \n

    \n attachment.device-index - The device index to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.instance-id - The ID of the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    \n
  • \n
  • \n

    \n availability-zone - The Availability Zone of the network interface.

    \n
  • \n
  • \n

    \n description - The description of the network interface.

    \n
  • \n
  • \n

    \n group-id - The ID of a security group associated with the network interface.

    \n
  • \n
  • \n

    \n ipv6-addresses.ipv6-address - An IPv6 address associated with\n the network interface.

    \n
  • \n
  • \n

    \n interface-type - The type of network interface (api_gateway_managed | \n\t\t aws_codestar_connections_managed | branch | ec2_instance_connect_endpoint | \n\t\t efa | efa-only | efs | gateway_load_balancer | \n\t\t gateway_load_balancer_endpoint | global_accelerator_managed | interface | \n\t\t iot_rules_managed | lambda | load_balancer | nat_gateway | \n\t\t network_load_balancer | quicksight | transit_gateway | trunk | \n\t\t vpc_endpoint).

    \n
  • \n
  • \n

    \n mac-address - The MAC address of the network interface.

    \n
  • \n
  • \n

    \n network-interface-id - The ID of the network interface.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the network interface owner.

    \n
  • \n
  • \n

    \n private-dns-name - The private DNS name of the network interface (IPv4).

    \n
  • \n
  • \n

    \n private-ip-address - The private IPv4 address or addresses of the\n network interface.

    \n
  • \n
  • \n

    \n requester-id - The alias or Amazon Web Services account ID of the principal or service that created the network interface.

    \n
  • \n
  • \n

    \n requester-managed - Indicates whether the network interface is being managed by an Amazon Web Services \n\t\t service (for example, Amazon Web Services Management Console, Auto Scaling, and so on).

    \n
  • \n
  • \n

    \n source-dest-check - Indicates whether the network interface performs source/destination checking. \n\t\t A value of true means checking is enabled, and false means checking is disabled. \n\t\t The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    \n
  • \n
  • \n

    \n status - The status of the network interface. If the network interface is not attached to an instance, the status is available; \n\t\t if a network interface is attached to an instance the status is in-use.

    \n
  • \n
  • \n

    \n subnet-id - The ID of the subnet for the network interface.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC for the network interface.

    \n
  • \n
", "smithy.api#xmlName": "filter" } } @@ -52219,7 +52290,7 @@ } }, "traits": { - "smithy.api#documentation": "

A filter name and value pair that is used to return a more specific list of results from a describe operation. \n Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs.

\n

If you specify multiple filters, the filters are joined with an AND, and the request returns only \n results that match all of the specified filters.

" + "smithy.api#documentation": "

A filter name and value pair that is used to return a more specific list of results from a describe operation. \n Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs.

\n

If you specify multiple filters, the filters are joined with an AND, and the request returns only \n results that match all of the specified filters.

\n

For more information, see List and filter using the CLI and API in the Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#FilterList": { @@ -61756,6 +61827,14 @@ "smithy.api#xmlName": "currentInstanceBootMode" } }, + "NetworkPerformanceOptions": { + "target": "com.amazonaws.ec2#InstanceNetworkPerformanceOptions", + "traits": { + "aws.protocols#ec2QueryName": "NetworkPerformanceOptions", + "smithy.api#documentation": "

Contains settings for the network performance options for your instance.

", + "smithy.api#xmlName": "networkPerformanceOptions" + } + }, "Operator": { "target": "com.amazonaws.ec2#OperatorResponse", "traits": { @@ -62231,6 +62310,29 @@ } } }, + "com.amazonaws.ec2#InstanceBandwidthWeighting": { + "type": "enum", + "members": { + "DEFAULT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "default" + } + }, + "VPC_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "vpc-1" + } + }, + "EBS_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ebs-1" + } + } + } + }, "com.amazonaws.ec2#InstanceBlockDeviceMapping": { "type": "structure", "members": { @@ -63993,6 +64095,36 @@ } } }, + "com.amazonaws.ec2#InstanceNetworkPerformanceOptions": { + "type": "structure", + "members": { + "BandwidthWeighting": { + "target": "com.amazonaws.ec2#InstanceBandwidthWeighting", + "traits": { + "aws.protocols#ec2QueryName": "BandwidthWeighting", + "smithy.api#documentation": "

When you configure network bandwidth weighting, you can boost your baseline bandwidth for either \n \t\tnetworking or EBS by up to 25%. The total available baseline bandwidth for your instance remains \n \t\tthe same. The default option uses the standard bandwidth configuration for your instance type.

", + "smithy.api#xmlName": "bandwidthWeighting" + } + } + }, + "traits": { + "smithy.api#documentation": "

With network performance options, you can adjust your bandwidth preferences to meet \n \t\tthe needs of the workload that runs on your instance.

" + } + }, + "com.amazonaws.ec2#InstanceNetworkPerformanceOptionsRequest": { + "type": "structure", + "members": { + "BandwidthWeighting": { + "target": "com.amazonaws.ec2#InstanceBandwidthWeighting", + "traits": { + "smithy.api#documentation": "

Specify the bandwidth weighting option to boost the associated type of baseline bandwidth, \n \t\tas follows:

\n
\n
default
\n
\n

This option uses the standard bandwidth configuration for your instance type.

\n
\n
vpc-1
\n
\n

This option boosts your networking baseline bandwidth and reduces your EBS baseline \n \t\t\t\t\tbandwidth.

\n
\n
ebs-1
\n
\n

This option boosts your EBS baseline bandwidth and reduces your networking baseline \n \t\t\t\t\tbandwidth.

\n
\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "

Configure network performance options for your instance that are geared towards performance \n \t\timprovements based on the workload that it runs.

" + } + }, "com.amazonaws.ec2#InstancePrivateIpAddress": { "type": "structure", "members": { @@ -76062,6 +76194,36 @@ } } }, + "com.amazonaws.ec2#LaunchTemplateNetworkPerformanceOptions": { + "type": "structure", + "members": { + "BandwidthWeighting": { + "target": "com.amazonaws.ec2#InstanceBandwidthWeighting", + "traits": { + "aws.protocols#ec2QueryName": "BandwidthWeighting", + "smithy.api#documentation": "

When you configure network bandwidth weighting, you can boost baseline bandwidth for either networking \n \t\tor EBS by up to 25%. The total available baseline bandwidth for your instance remains \n the same. The default option uses the standard bandwidth configuration for your instance type.

", + "smithy.api#xmlName": "bandwidthWeighting" + } + } + }, + "traits": { + "smithy.api#documentation": "

With network performance options, you can adjust your bandwidth preferences to meet \n \t\tthe needs of the workload that runs on your instance at launch.

" + } + }, + "com.amazonaws.ec2#LaunchTemplateNetworkPerformanceOptionsRequest": { + "type": "structure", + "members": { + "BandwidthWeighting": { + "target": "com.amazonaws.ec2#InstanceBandwidthWeighting", + "traits": { + "smithy.api#documentation": "

Specify the bandwidth weighting option to boost the associated type of baseline bandwidth, as follows:

\n
\n
default
\n
\n

This option uses the standard bandwidth configuration for your instance type.

\n
\n
vpc-1
\n
\n

This option boosts your networking baseline bandwidth and reduces your EBS \n \t\t\t\t\tbaseline bandwidth.

\n
\n
ebs-1
\n
\n

This option boosts your EBS baseline bandwidth and reduces your networking \n \t\t\t\t\tbaseline bandwidth.

\n
\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "

When you configure network performance options in your launch template, your instance \n \t\tis geared for performance improvements based on the workload that it runs as soon as it's \n \t\tavailable.

" + } + }, "com.amazonaws.ec2#LaunchTemplateOverrides": { "type": "structure", "members": { @@ -80405,6 +80567,72 @@ "smithy.api#output": {} } }, + "com.amazonaws.ec2#ModifyInstanceNetworkPerformanceOptions": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#ModifyInstanceNetworkPerformanceRequest" + }, + "output": { + "target": "com.amazonaws.ec2#ModifyInstanceNetworkPerformanceResult" + }, + "traits": { + "smithy.api#documentation": "

Change the configuration of the network performance options for an existing \n \tinstance.

" + } + }, + "com.amazonaws.ec2#ModifyInstanceNetworkPerformanceRequest": { + "type": "structure", + "members": { + "InstanceId": { + "target": "com.amazonaws.ec2#InstanceId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ID of the instance to update.

", + "smithy.api#required": {} + } + }, + "BandwidthWeighting": { + "target": "com.amazonaws.ec2#InstanceBandwidthWeighting", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

Specify the bandwidth weighting option to boost the associated type of baseline bandwidth, as follows:

\n
\n
default
\n
\n

This option uses the standard bandwidth configuration for your instance type.

\n
\n
vpc-1
\n
\n

This option boosts your networking baseline bandwidth and reduces your EBS \n \t\t\t\t\tbaseline bandwidth.

\n
\n
ebs-1
\n
\n

This option boosts your EBS baseline bandwidth and reduces your networking \n \t\t\t\t\tbaseline bandwidth.

\n
\n
", + "smithy.api#required": {} + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#ModifyInstanceNetworkPerformanceResult": { + "type": "structure", + "members": { + "InstanceId": { + "target": "com.amazonaws.ec2#InstanceId", + "traits": { + "aws.protocols#ec2QueryName": "InstanceId", + "smithy.api#documentation": "

The instance ID that was updated.

", + "smithy.api#xmlName": "instanceId" + } + }, + "BandwidthWeighting": { + "target": "com.amazonaws.ec2#InstanceBandwidthWeighting", + "traits": { + "aws.protocols#ec2QueryName": "BandwidthWeighting", + "smithy.api#documentation": "

Contains the updated configuration for bandwidth weighting on the specified instance.

", + "smithy.api#xmlName": "bandwidthWeighting" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#ModifyInstancePlacement": { "type": "operation", "input": { @@ -85695,6 +85923,14 @@ "smithy.api#documentation": "

Indicates whether the instance type supports ENA Express. ENA Express uses Amazon Web Services Scalable Reliable Datagram (SRD) technology to increase the maximum bandwidth used per stream\n and minimize tail latency of network traffic between EC2 instances.

", "smithy.api#xmlName": "enaSrdSupported" } + }, + "BandwidthWeightings": { + "target": "com.amazonaws.ec2#BandwidthWeightingTypeList", + "traits": { + "aws.protocols#ec2QueryName": "BandwidthWeightings", + "smithy.api#documentation": "

A list of valid settings for configurable bandwidth weighting for the instance\n \ttype, if supported.

", + "smithy.api#xmlName": "bandwidthWeightings" + } } }, "traits": { @@ -93600,6 +93836,12 @@ "traits": { "smithy.api#documentation": "

The entity that manages the launch template.

" } + }, + "NetworkPerformanceOptions": { + "target": "com.amazonaws.ec2#LaunchTemplateNetworkPerformanceOptionsRequest", + "traits": { + "smithy.api#documentation": "

Contains launch template settings to boost network performance for the type of \n \tworkload that runs on your instance.

" + } } }, "traits": { @@ -96348,6 +96590,14 @@ "smithy.api#documentation": "

The entity that manages the launch template.

", "smithy.api#xmlName": "operator" } + }, + "NetworkPerformanceOptions": { + "target": "com.amazonaws.ec2#LaunchTemplateNetworkPerformanceOptions", + "traits": { + "aws.protocols#ec2QueryName": "NetworkPerformanceOptions", + "smithy.api#documentation": "

Contains the launch template settings for network performance options for \n \tyour instance.

", + "smithy.api#xmlName": "networkPerformanceOptions" + } } }, "traits": { @@ -98025,6 +98275,12 @@ "smithy.api#documentation": "

If you’re launching an instance into a dual-stack or IPv6-only subnet, you can enable\n assigning a primary IPv6 address. A primary IPv6 address is an IPv6 GUA address\n associated with an ENI that you have enabled to use a primary IPv6 address. Use this\n option if an instance relies on its IPv6 address not changing. When you launch the\n instance, Amazon Web Services will automatically assign an IPv6 address associated with\n the ENI attached to your instance to be the primary IPv6 address. Once you enable an\n IPv6 GUA address to be a primary IPv6, you cannot disable it. When you enable an IPv6\n GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6\n address until the instance is terminated or the network interface is detached. If you\n have multiple IPv6 addresses associated with an ENI attached to your instance and you\n enable a primary IPv6 address, the first IPv6 GUA address associated with the ENI\n becomes the primary IPv6 address.

" } }, + "NetworkPerformanceOptions": { + "target": "com.amazonaws.ec2#InstanceNetworkPerformanceOptionsRequest", + "traits": { + "smithy.api#documentation": "

Contains settings for the network performance options for the instance.

" + } + }, "Operator": { "target": "com.amazonaws.ec2#OperatorRequest", "traits": { @@ -100501,6 +100757,14 @@ "smithy.api#xmlName": "sseType" } }, + "AvailabilityZone": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "AvailabilityZone", + "smithy.api#documentation": "

The Availability Zone or Local Zone of the snapshot. For example, us-west-1a \n (Availability Zone) or us-west-2-lax-1a (Local Zone).

", + "smithy.api#xmlName": "availabilityZone" + } + }, "TransferType": { "target": "com.amazonaws.ec2#TransferType", "traits": { @@ -100917,6 +101181,14 @@ "smithy.api#documentation": "

Reserved for future use.

", "smithy.api#xmlName": "sseType" } + }, + "AvailabilityZone": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "AvailabilityZone", + "smithy.api#documentation": "

The Availability Zone or Local Zone of the snapshots. For example, us-west-1a \n (Availability Zone) or us-west-2-lax-1a (Local Zone).

", + "smithy.api#xmlName": "availabilityZone" + } } }, "traits": { @@ -100932,6 +101204,23 @@ } } }, + "com.amazonaws.ec2#SnapshotLocationEnum": { + "type": "enum", + "members": { + "REGIONAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "regional" + } + }, + "LOCAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "local" + } + } + } + }, "com.amazonaws.ec2#SnapshotRecycleBinInfo": { "type": "structure", "members": { @@ -102649,7 +102938,7 @@ "target": "com.amazonaws.ec2#StartDeclarativePoliciesReportResult" }, "traits": { - "smithy.api#documentation": "

Generates an account status report. The report is generated asynchronously, and can\n take several hours to complete.

\n

The report provides the current status of all attributes supported by declarative\n policies for the accounts within the specified scope. The scope is determined by the\n specified TargetId, which can represent an individual account, or all the\n accounts that fall under the specified organizational unit (OU) or root (the entire\n Amazon Web Services Organization).

\n

The report is saved to your specified S3 bucket, using the following path structure\n (with the italicized placeholders representing your specific\n values):

\n

\n s3://amzn-s3-demo-bucket/your-optional-s3-prefix/ec2_targetId_reportId_yyyyMMddThhmmZ.csv\n

\n

\n Prerequisites for generating a report\n

\n
    \n
  • \n

    The StartDeclarativePoliciesReport API can only be called by the\n management account or delegated administrators for the organization.

    \n
  • \n
  • \n

    An S3 bucket must be available before generating the report (you can create a\n new one or use an existing one), and it must have an appropriate bucket policy.\n For a sample S3 policy, see Sample Amazon S3 policy under\n .

    \n
  • \n
  • \n

    Trusted access must be enabled for the service for which the declarative\n policy will enforce a baseline configuration. If you use the Amazon Web Services Organizations\n console, this is done automatically when you enable declarative policies. The\n API uses the following service principal to identify the EC2 service:\n ec2.amazonaws.com. For more information on how to enable\n trusted access with the Amazon Web Services CLI and Amazon Web Services SDKs, see Using\n Organizations with other Amazon Web Services services in the\n Amazon Web Services Organizations User Guide.

    \n
  • \n
  • \n

    Only one report per organization can be generated at a time. Attempting to\n generate a report while another is in progress will result in an error.

    \n
  • \n
\n

For more information, including the required IAM permissions to run this API, see\n Generating the account status report for declarative policies in the\n Amazon Web Services Organizations User Guide.

" + "smithy.api#documentation": "

Generates an account status report. The report is generated asynchronously, and can\n take several hours to complete.

\n

The report provides the current status of all attributes supported by declarative\n policies for the accounts within the specified scope. The scope is determined by the\n specified TargetId, which can represent an individual account, or all the\n accounts that fall under the specified organizational unit (OU) or root (the entire\n Amazon Web Services Organization).

\n

The report is saved to your specified S3 bucket, using the following path structure\n (with the italicized placeholders representing your specific\n values):

\n

\n s3://amzn-s3-demo-bucket/your-optional-s3-prefix/ec2_targetId_reportId_yyyyMMddThhmmZ.csv\n

\n

\n Prerequisites for generating a report\n

\n
    \n
  • \n

    The StartDeclarativePoliciesReport API can only be called by the\n management account or delegated administrators for the organization.

    \n
  • \n
  • \n

    An S3 bucket must be available before generating the report (you can create a\n new one or use an existing one), it must be in the same Region where the report\n generation request is made, and it must have an appropriate bucket policy. For a\n sample S3 policy, see Sample Amazon S3 policy under .

    \n
  • \n
  • \n

    Trusted access must be enabled for the service for which the declarative\n policy will enforce a baseline configuration. If you use the Amazon Web Services Organizations\n console, this is done automatically when you enable declarative policies. The\n API uses the following service principal to identify the EC2 service:\n ec2.amazonaws.com. For more information on how to enable\n trusted access with the Amazon Web Services CLI and Amazon Web Services SDKs, see Using\n Organizations with other Amazon Web Services services in the\n Amazon Web Services Organizations User Guide.

    \n
  • \n
  • \n

    Only one report per organization can be generated at a time. Attempting to\n generate a report while another is in progress will result in an error.

    \n
  • \n
\n

For more information, including the required IAM permissions to run this API, see\n Generating the account status report for declarative policies in the\n Amazon Web Services Organizations User Guide.

" } }, "com.amazonaws.ec2#StartDeclarativePoliciesReportRequest": { @@ -102665,7 +102954,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of the S3 bucket where the report will be saved.

", + "smithy.api#documentation": "

The name of the S3 bucket where the report will be saved. The bucket must be in the\n same Region where the report generation request is made.

", "smithy.api#required": {} } }, diff --git a/models/ecs.json b/models/ecs.json index 81b6d75414..a18d106fa8 100644 --- a/models/ecs.json +++ b/models/ecs.json @@ -279,7 +279,7 @@ "name": "ecs" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "Amazon Elastic Container Service\n

Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service. It makes it easy to run,\n\t\t\tstop, and manage Docker containers. You can host your cluster on a serverless infrastructure that's\n\t\t\tmanaged by Amazon ECS by launching your services or tasks on Fargate. For more control, you can host your\n\t\t\ttasks on a cluster of Amazon Elastic Compute Cloud (Amazon EC2) or External (on-premises) instances that you manage.

\n

Amazon ECS makes it easy to launch and stop container-based applications with simple API calls. This makes\n\t\t\tit easy to get the state of your cluster from a centralized service, and gives you access to many\n\t\t\tfamiliar Amazon EC2 features.

\n

You can use Amazon ECS to schedule the placement of containers across your cluster based on your resource\n\t\t\tneeds, isolation policies, and availability requirements. With Amazon ECS, you don't need to operate your\n\t\t\town cluster management and configuration management systems. You also don't need to worry about scaling\n\t\t\tyour management infrastructure.

", + "smithy.api#documentation": "Amazon Elastic Container Service\n

Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service. It makes\n\t\t\tit easy to run, stop, and manage Docker containers. You can host your cluster on a\n\t\t\tserverless infrastructure that's managed by Amazon ECS by launching your services or tasks on\n\t\t\tFargate. For more control, you can host your tasks on a cluster of Amazon Elastic Compute Cloud (Amazon EC2)\n\t\t\tor External (on-premises) instances that you manage.

\n

Amazon ECS makes it easy to launch and stop container-based applications with simple API\n\t\t\tcalls. This makes it easy to get the state of your cluster from a centralized service,\n\t\t\tand gives you access to many familiar Amazon EC2 features.

\n

You can use Amazon ECS to schedule the placement of containers across your cluster based on\n\t\t\tyour resource needs, isolation policies, and availability requirements. With Amazon ECS, you\n\t\t\tdon't need to operate your own cluster management and configuration management systems.\n\t\t\tYou also don't need to worry about scaling your management infrastructure.

", "smithy.api#title": "Amazon EC2 Container Service", "smithy.api#xmlNamespace": { "uri": "http://ecs.amazonaws.com/doc/2014-11-13/" @@ -1336,13 +1336,13 @@ "status": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The status of the attachment. Valid values are PRECREATED, CREATED,\n\t\t\t\tATTACHING, ATTACHED, DETACHING, DETACHED,\n\t\t\t\tDELETED, and FAILED.

" + "smithy.api#documentation": "

The status of the attachment. Valid values are PRECREATED,\n\t\t\t\tCREATED, ATTACHING, ATTACHED,\n\t\t\t\tDETACHING, DETACHED, DELETED, and\n\t\t\t\tFAILED.

" } }, "details": { "target": "com.amazonaws.ecs#AttachmentDetails", "traits": { - "smithy.api#documentation": "

Details of the attachment.

\n

For elastic network interfaces, this includes the network interface ID, the MAC address, the subnet\n\t\t\tID, and the private IPv4 address.

\n

For Service Connect services, this includes portName, clientAliases,\n\t\t\t\tdiscoveryName, and ingressPortOverride.

\n

For Elastic Block Storage, this includes roleArn, deleteOnTermination,\n\t\t\t\tvolumeName, volumeId, and statusReason (only when the\n\t\t\tattachment fails to create or attach).

" + "smithy.api#documentation": "

Details of the attachment.

\n

For elastic network interfaces, this includes the network interface ID, the MAC\n\t\t\taddress, the subnet ID, and the private IPv4 address.

\n

For Service Connect services, this includes portName,\n\t\t\t\tclientAliases, discoveryName, and\n\t\t\t\tingressPortOverride.

\n

For Elastic Block Storage, this includes roleArn,\n\t\t\t\tdeleteOnTermination, volumeName, volumeId,\n\t\t\tand statusReason (only when the attachment fails to create or\n\t\t\tattach).

" } } }, @@ -1396,31 +1396,31 @@ "name": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of the attribute. The name must contain between 1 and 128 characters. The name\n\t\t\tmay contain letters (uppercase and lowercase), numbers, hyphens (-), underscores (_), forward slashes\n\t\t\t(/), back slashes (\\), or periods (.).

", + "smithy.api#documentation": "

The name of the attribute. The name must contain between 1 and 128\n\t\t\tcharacters. The name may contain letters (uppercase and lowercase), numbers, hyphens\n\t\t\t(-), underscores (_), forward slashes (/), back slashes (\\), or periods (.).

", "smithy.api#required": {} } }, "value": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The value of the attribute. The value must contain between 1 and 128 characters. It can\n\t\t\tcontain letters (uppercase and lowercase), numbers, hyphens (-), underscores (_), periods (.), at signs\n\t\t\t(@), forward slashes (/), back slashes (\\), colons (:), or spaces. The value can't start or end with a\n\t\t\tspace.

" + "smithy.api#documentation": "

The value of the attribute. The value must contain between 1 and 128\n\t\t\tcharacters. It can contain letters (uppercase and lowercase), numbers, hyphens (-),\n\t\t\tunderscores (_), periods (.), at signs (@), forward slashes (/), back slashes (\\),\n\t\t\tcolons (:), or spaces. The value can't start or end with a space.

" } }, "targetType": { "target": "com.amazonaws.ecs#TargetType", "traits": { - "smithy.api#documentation": "

The type of the target to attach the attribute with. This parameter is required if you use the short\n\t\t\tform ID for a resource instead of the full ARN.

" + "smithy.api#documentation": "

The type of the target to attach the attribute with. This parameter is required if you\n\t\t\tuse the short form ID for a resource instead of the full ARN.

" } }, "targetId": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The ID of the target. You can specify the short form ID for a resource or the full Amazon Resource Name (ARN).

" + "smithy.api#documentation": "

The ID of the target. You can specify the short form ID for a resource or the full\n\t\t\tAmazon Resource Name (ARN).

" } } }, "traits": { - "smithy.api#documentation": "

An attribute is a name-value pair that's associated with an Amazon ECS object. Use attributes to extend\n\t\t\tthe Amazon ECS data model by adding custom metadata to your resources. For more information, see Attributes\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

An attribute is a name-value pair that's associated with an Amazon ECS object. Use\n\t\t\tattributes to extend the Amazon ECS data model by adding custom metadata to your resources.\n\t\t\tFor more information, see Attributes in the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#AttributeLimitExceededException": { @@ -1434,7 +1434,7 @@ } }, "traits": { - "smithy.api#documentation": "

You can apply up to 10 custom attributes for each resource. You can view the attributes of a resource\n\t\t\twith ListAttributes. You can remove existing attributes on a resource with DeleteAttributes.

", + "smithy.api#documentation": "

You can apply up to 10 custom attributes for each resource. You can view the\n\t\t\tattributes of a resource with ListAttributes.\n\t\t\tYou can remove existing attributes on a resource with DeleteAttributes.

", "smithy.api#error": "client" } }, @@ -1450,7 +1450,7 @@ "autoScalingGroupArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) that identifies the Auto Scaling group, or the Auto Scaling group name.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that identifies the Auto Scaling group, or the Auto Scaling group\n\t\t\tname.

", "smithy.api#required": {} } }, @@ -1463,7 +1463,7 @@ "managedTerminationProtection": { "target": "com.amazonaws.ecs#ManagedTerminationProtection", "traits": { - "smithy.api#documentation": "

The managed termination protection setting to use for the Auto Scaling group capacity provider. This\n\t\t\tdetermines whether the Auto Scaling group has managed termination protection. The default is\n\t\t\toff.

\n \n

When using managed termination protection, managed scaling must also be used otherwise managed\n\t\t\t\ttermination protection doesn't work.

\n
\n

When managed termination protection is on, Amazon ECS prevents the Amazon EC2 instances in an Auto Scaling\n\t\t\tgroup that contain tasks from being terminated during a scale-in action. The Auto Scaling group and\n\t\t\teach instance in the Auto Scaling group must have instance protection from scale-in actions on as well.\n\t\t\tFor more information, see Instance\n\t\t\t\tProtection in the Auto Scaling User Guide.

\n

When managed termination protection is off, your Amazon EC2 instances aren't protected from termination\n\t\t\twhen the Auto Scaling group scales in.

" + "smithy.api#documentation": "

The managed termination protection setting to use for the Auto Scaling group capacity\n\t\t\tprovider. This determines whether the Auto Scaling group has managed termination\n\t\t\tprotection. The default is off.

\n \n

When using managed termination protection, managed scaling must also be used\n\t\t\t\totherwise managed termination protection doesn't work.

\n
\n

When managed termination protection is on, Amazon ECS prevents the Amazon EC2 instances in an\n\t\t\tAuto Scaling group that contain tasks from being terminated during a scale-in action.\n\t\t\tThe Auto Scaling group and each instance in the Auto Scaling group must have instance\n\t\t\tprotection from scale-in actions on as well. For more information, see Instance Protection in the Auto Scaling User Guide.

\n

When managed termination protection is off, your Amazon EC2 instances aren't protected from\n\t\t\ttermination when the Auto Scaling group scales in.

" } }, "managedDraining": { @@ -1489,7 +1489,7 @@ "managedTerminationProtection": { "target": "com.amazonaws.ecs#ManagedTerminationProtection", "traits": { - "smithy.api#documentation": "

The managed termination protection setting to use for the Auto Scaling group capacity provider. This\n\t\t\tdetermines whether the Auto Scaling group has managed termination protection.

\n \n

When using managed termination protection, managed scaling must also be used otherwise managed\n\t\t\t\ttermination protection doesn't work.

\n
\n

When managed termination protection is on, Amazon ECS prevents the Amazon EC2 instances in an Auto Scaling\n\t\t\tgroup that contain tasks from being terminated during a scale-in action. The Auto Scaling group and\n\t\t\teach instance in the Auto Scaling group must have instance protection from scale-in actions on. For\n\t\t\tmore information, see Instance\n\t\t\t\tProtection in the Auto Scaling User Guide.

\n

When managed termination protection is off, your Amazon EC2 instances aren't protected from termination\n\t\t\twhen the Auto Scaling group scales in.

" + "smithy.api#documentation": "

The managed termination protection setting to use for the Auto Scaling group capacity\n\t\t\tprovider. This determines whether the Auto Scaling group has managed termination\n\t\t\tprotection.

\n \n

When using managed termination protection, managed scaling must also be used\n\t\t\t\totherwise managed termination protection doesn't work.

\n
\n

When managed termination protection is on, Amazon ECS prevents the Amazon EC2 instances in an\n\t\t\tAuto Scaling group that contain tasks from being terminated during a scale-in action.\n\t\t\tThe Auto Scaling group and each instance in the Auto Scaling group must have instance\n\t\t\tprotection from scale-in actions on. For more information, see Instance Protection in the Auto Scaling User Guide.

\n

When managed termination protection is off, your Amazon EC2 instances aren't protected from\n\t\t\ttermination when the Auto Scaling group scales in.

" } }, "managedDraining": { @@ -1526,20 +1526,20 @@ "subnets": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be\n\t\t\tspecified per awsvpcConfiguration.

\n \n

All specified subnets must be from the same VPC.

\n
", + "smithy.api#documentation": "

The IDs of the subnets associated with the task or service. There's a limit of 16\n\t\t\tsubnets that can be specified per awsvpcConfiguration.

\n \n

All specified subnets must be from the same VPC.

\n
", "smithy.api#required": {} } }, "securityGroups": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The IDs of the security groups associated with the task or service. If you don't specify a security\n\t\t\tgroup, the default security group for the VPC is used. There's a limit of 5 security groups that can be\n\t\t\tspecified per awsvpcConfiguration.

\n \n

All specified security groups must be from the same VPC.

\n
" + "smithy.api#documentation": "

The IDs of the security groups associated with the task or service. If you don't\n\t\t\tspecify a security group, the default security group for the VPC is used. There's a\n\t\t\tlimit of 5 security groups that can be specified per\n\t\t\tawsvpcConfiguration.

\n \n

All specified security groups must be from the same VPC.

\n
" } }, "assignPublicIp": { "target": "com.amazonaws.ecs#AssignPublicIp", "traits": { - "smithy.api#documentation": "

Whether the task's elastic network interface receives a public IP address. The default value is\n\t\t\t\tENABLED.

" + "smithy.api#documentation": "

Whether the task's elastic network interface receives a public IP address. The default\n\t\t\tvalue is ENABLED.

" } } }, @@ -1609,7 +1609,7 @@ "status": { "target": "com.amazonaws.ecs#CapacityProviderStatus", "traits": { - "smithy.api#documentation": "

The current status of the capacity provider. Only capacity providers in an ACTIVE state\n\t\t\tcan be used in a cluster. When a capacity provider is successfully deleted, it has an\n\t\t\t\tINACTIVE status.

" + "smithy.api#documentation": "

The current status of the capacity provider. Only capacity providers in an\n\t\t\t\tACTIVE state can be used in a cluster. When a capacity provider is\n\t\t\tsuccessfully deleted, it has an INACTIVE status.

" } }, "autoScalingGroupProvider": { @@ -1621,19 +1621,19 @@ "updateStatus": { "target": "com.amazonaws.ecs#CapacityProviderUpdateStatus", "traits": { - "smithy.api#documentation": "

The update status of the capacity provider. The following are the possible states that is\n\t\t\treturned.

\n
\n
DELETE_IN_PROGRESS
\n
\n

The capacity provider is in the process of being deleted.

\n
\n
DELETE_COMPLETE
\n
\n

The capacity provider was successfully deleted and has an INACTIVE\n\t\t\t\t\t\tstatus.

\n
\n
DELETE_FAILED
\n
\n

The capacity provider can't be deleted. The update status reason provides further details\n\t\t\t\t\t\tabout why the delete failed.

\n
\n
" + "smithy.api#documentation": "

The update status of the capacity provider. The following are the possible states that\n\t\t\tis returned.

\n
\n
DELETE_IN_PROGRESS
\n
\n

The capacity provider is in the process of being deleted.

\n
\n
DELETE_COMPLETE
\n
\n

The capacity provider was successfully deleted and has an\n\t\t\t\t\t\t\tINACTIVE status.

\n
\n
DELETE_FAILED
\n
\n

The capacity provider can't be deleted. The update status reason provides\n\t\t\t\t\t\tfurther details about why the delete failed.

\n
\n
" } }, "updateStatusReason": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The update status reason. This provides further details about the update status for the capacity\n\t\t\tprovider.

" + "smithy.api#documentation": "

The update status reason. This provides further details about the update status for\n\t\t\tthe capacity provider.

" } }, "tags": { "target": "com.amazonaws.ecs#Tags", "traits": { - "smithy.api#documentation": "

The metadata that you apply to the capacity provider to help you categorize and organize it. Each tag\n\t\t\tconsists of a key and an optional value. You define both.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" + "smithy.api#documentation": "

The metadata that you apply to the capacity provider to help you categorize and\n\t\t\torganize it. Each tag consists of a key and an optional value. You define both.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" } } }, @@ -1695,19 +1695,19 @@ "target": "com.amazonaws.ecs#CapacityProviderStrategyItemWeight", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The weight value designates the relative percentage of the total number of tasks\n\t\t\tlaunched that should use the specified capacity provider. The weight value is taken into\n\t\t\tconsideration after the base value, if defined, is satisfied.

\n

If no weight value is specified, the default value of 0 is used. When\n\t\t\tmultiple capacity providers are specified within a capacity provider strategy, at least one of the\n\t\t\tcapacity providers must have a weight value greater than zero and any capacity providers with a weight\n\t\t\tof 0 can't be used to place tasks. If you specify multiple capacity providers in a\n\t\t\tstrategy that all have a weight of 0, any RunTask or\n\t\t\t\tCreateService actions using the capacity provider strategy will fail.

\n

An example scenario for using weights is defining a strategy that contains two capacity providers and\n\t\t\tboth have a weight of 1, then when the base is satisfied, the tasks will be\n\t\t\tsplit evenly across the two capacity providers. Using that same logic, if you specify a weight of\n\t\t\t\t1 for capacityProviderA and a weight of 4 for\n\t\t\t\tcapacityProviderB, then for every one task that's run using\n\t\t\t\tcapacityProviderA, four tasks would use\n\t\t\tcapacityProviderB.

" + "smithy.api#documentation": "

The weight value designates the relative percentage of the total\n\t\t\tnumber of tasks launched that should use the specified capacity provider. The\n\t\t\t\tweight value is taken into consideration after the base\n\t\t\tvalue, if defined, is satisfied.

\n

If no weight value is specified, the default value of 0 is\n\t\t\tused. When multiple capacity providers are specified within a capacity provider\n\t\t\tstrategy, at least one of the capacity providers must have a weight value greater than\n\t\t\tzero and any capacity providers with a weight of 0 can't be used to place\n\t\t\ttasks. If you specify multiple capacity providers in a strategy that all have a weight\n\t\t\tof 0, any RunTask or CreateService actions using\n\t\t\tthe capacity provider strategy will fail.

\n

An example scenario for using weights is defining a strategy that contains two\n\t\t\tcapacity providers and both have a weight of 1, then when the\n\t\t\t\tbase is satisfied, the tasks will be split evenly across the two\n\t\t\tcapacity providers. Using that same logic, if you specify a weight of 1 for\n\t\t\t\tcapacityProviderA and a weight of 4 for\n\t\t\t\tcapacityProviderB, then for every one task that's run using\n\t\t\t\tcapacityProviderA, four tasks would use\n\t\t\t\tcapacityProviderB.

" } }, "base": { "target": "com.amazonaws.ecs#CapacityProviderStrategyItemBase", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The base value designates how many tasks, at a minimum, to run on the specified\n\t\t\tcapacity provider. Only one capacity provider in a capacity provider strategy can have a\n\t\t\t\tbase defined. If no value is specified, the default value of 0 is\n\t\t\tused.

" + "smithy.api#documentation": "

The base value designates how many tasks, at a minimum, to run on\n\t\t\tthe specified capacity provider. Only one capacity provider in a capacity provider\n\t\t\tstrategy can have a base defined. If no value is specified, the\n\t\t\tdefault value of 0 is used.

" } } }, "traits": { - "smithy.api#documentation": "

The details of a capacity provider strategy. A capacity provider strategy can be set when using the\n\t\t\t\tRunTaskor\n\t\t\t\tCreateCluster APIs or as the default capacity provider strategy for a cluster with the\n\t\t\t\tCreateCluster API.

\n

Only capacity providers that are already associated with a cluster and have an ACTIVE or\n\t\t\t\tUPDATING status can be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a\n\t\t\tcluster.

\n

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already\n\t\t\tbe created. New Auto Scaling group capacity providers can be created with the CreateClusterCapacityProvider API operation.

\n

To use a Fargate capacity provider, specify either the FARGATE or\n\t\t\t\tFARGATE_SPOT capacity providers. The Fargate capacity providers are available to all\n\t\t\taccounts and only need to be associated with a cluster to be used in a capacity provider\n\t\t\tstrategy.

\n

With FARGATE_SPOT, you can run interruption tolerant tasks at a rate that's discounted\n\t\t\tcompared to the FARGATE price. FARGATE_SPOT runs tasks on spare compute\n\t\t\tcapacity. When Amazon Web Services needs the capacity back, your tasks are interrupted with a two-minute warning.\n\t\t\t\tFARGATE_SPOT supports Linux tasks with the X86_64 architecture on platform version\n\t\t\t1.3.0 or later. FARGATE_SPOT supports Linux tasks with the ARM64 architecture on platform\n\t\t\tversion 1.4.0 or later.

\n

A capacity provider strategy may contain a maximum of 6 capacity providers.

" + "smithy.api#documentation": "

The details of a capacity provider strategy. A capacity provider strategy can be set\n\t\t\twhen using the RunTaskor CreateCluster APIs or as the default capacity provider strategy for a\n\t\t\tcluster with the CreateCluster API.

\n

Only capacity providers that are already associated with a cluster and have an\n\t\t\t\tACTIVE or UPDATING status can be used in a capacity\n\t\t\tprovider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider\n\t\t\twith a cluster.

\n

If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New Auto Scaling group capacity providers can be\n\t\t\tcreated with the CreateClusterCapacityProvider API operation.

\n

To use a Fargate capacity provider, specify either the FARGATE or\n\t\t\t\tFARGATE_SPOT capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be used in a\n\t\t\tcapacity provider strategy.

\n

With FARGATE_SPOT, you can run interruption tolerant tasks at a rate\n\t\t\tthat's discounted compared to the FARGATE price. FARGATE_SPOT\n\t\t\truns tasks on spare compute capacity. When Amazon Web Services needs the capacity back, your tasks are\n\t\t\tinterrupted with a two-minute warning. FARGATE_SPOT supports Linux tasks\n\t\t\twith the X86_64 architecture on platform version 1.3.0 or later.\n\t\t\t\tFARGATE_SPOT supports Linux tasks with the ARM64 architecture on\n\t\t\tplatform version 1.4.0 or later.

\n

A capacity provider strategy can contain a maximum of 20 capacity providers.

" } }, "com.amazonaws.ecs#CapacityProviderStrategyItemBase": { @@ -1788,7 +1788,7 @@ } }, "traits": { - "smithy.api#documentation": "

These errors are usually caused by a client action. This client action might be using an action or\n\t\t\tresource on behalf of a user that doesn't have permissions to use the action or resource. Or, it might\n\t\t\tbe specifying an identifier that isn't valid.

\n

The following list includes additional causes for the error:

\n
    \n
  • \n

    The RunTask could not be processed because you use managed scaling and there is\n\t\t\t\t\ta capacity error because the quota of tasks in the PROVISIONING per cluster has\n\t\t\t\t\tbeen reached. For information about the service quotas, see Amazon ECS service\n\t\t\t\t\t\tquotas.

    \n
  • \n
", + "smithy.api#documentation": "

These errors are usually caused by a client action. This client action might be using\n\t\t\tan action or resource on behalf of a user that doesn't have permissions to use the\n\t\t\taction or resource. Or, it might be specifying an identifier that isn't valid.

\n

The following list includes additional causes for the error:

\n
    \n
  • \n

    The RunTask could not be processed because you use managed\n\t\t\t\t\tscaling and there is a capacity error because the quota of tasks in the\n\t\t\t\t\t\tPROVISIONING per cluster has been reached. For information\n\t\t\t\t\tabout the service quotas, see Amazon ECS\n\t\t\t\t\t\tservice quotas.

    \n
  • \n
", "smithy.api#error": "client" } }, @@ -1798,7 +1798,7 @@ "clusterArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) that identifies the cluster. For more information about the ARN format, see Amazon Resource Name (ARN)\n\t\t\tin the Amazon ECS Developer Guide.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that identifies the cluster. For more information about the ARN\n\t\t\tformat, see Amazon Resource Name (ARN) in the Amazon ECS Developer Guide.

" } }, "clusterName": { @@ -1816,14 +1816,14 @@ "status": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The status of the cluster. The following are the possible states that are returned.

\n
\n
ACTIVE
\n
\n

The cluster is ready to accept tasks and if applicable you can register container\n\t\t\t\t\t\tinstances with the cluster.

\n
\n
PROVISIONING
\n
\n

The cluster has capacity providers that are associated with it and the resources needed\n\t\t\t\t\t\tfor the capacity provider are being created.

\n
\n
DEPROVISIONING
\n
\n

The cluster has capacity providers that are associated with it and the resources needed\n\t\t\t\t\t\tfor the capacity provider are being deleted.

\n
\n
FAILED
\n
\n

The cluster has capacity providers that are associated with it and the resources needed\n\t\t\t\t\t\tfor the capacity provider have failed to create.

\n
\n
INACTIVE
\n
\n

The cluster has been deleted. Clusters with an INACTIVE status may remain\n\t\t\t\t\t\tdiscoverable in your account for a period of time. However, this behavior is subject to\n\t\t\t\t\t\tchange in the future. We don't recommend that you rely on INACTIVE clusters\n\t\t\t\t\t\tpersisting.

\n
\n
" + "smithy.api#documentation": "

The status of the cluster. The following are the possible states that are\n\t\t\treturned.

\n
\n
ACTIVE
\n
\n

The cluster is ready to accept tasks and if applicable you can register\n\t\t\t\t\t\tcontainer instances with the cluster.

\n
\n
PROVISIONING
\n
\n

The cluster has capacity providers that are associated with it and the\n\t\t\t\t\t\tresources needed for the capacity provider are being created.

\n
\n
DEPROVISIONING
\n
\n

The cluster has capacity providers that are associated with it and the\n\t\t\t\t\t\tresources needed for the capacity provider are being deleted.

\n
\n
FAILED
\n
\n

The cluster has capacity providers that are associated with it and the\n\t\t\t\t\t\tresources needed for the capacity provider have failed to create.

\n
\n
INACTIVE
\n
\n

The cluster has been deleted. Clusters with an INACTIVE\n\t\t\t\t\t\tstatus may remain discoverable in your account for a period of time.\n\t\t\t\t\t\tHowever, this behavior is subject to change in the future. We don't\n\t\t\t\t\t\trecommend that you rely on INACTIVE clusters persisting.

\n
\n
" } }, "registeredContainerInstancesCount": { "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The number of container instances registered into the cluster. This includes container instances in\n\t\t\tboth ACTIVE and DRAINING status.

" + "smithy.api#documentation": "

The number of container instances registered into the cluster. This includes container\n\t\t\tinstances in both ACTIVE and DRAINING status.

" } }, "runningTasksCount": { @@ -1844,25 +1844,25 @@ "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The number of services that are running on the cluster in an ACTIVE state. You can view\n\t\t\tthese services with PListServices.

" + "smithy.api#documentation": "

The number of services that are running on the cluster in an ACTIVE\n\t\t\tstate. You can view these services with PListServices.

" } }, "statistics": { "target": "com.amazonaws.ecs#Statistics", "traits": { - "smithy.api#documentation": "

Additional information about your clusters that are separated by launch type. They include the\n\t\t\tfollowing:

\n
    \n
  • \n

    runningEC2TasksCount

    \n
  • \n
  • \n

    RunningFargateTasksCount

    \n
  • \n
  • \n

    pendingEC2TasksCount

    \n
  • \n
  • \n

    pendingFargateTasksCount

    \n
  • \n
  • \n

    activeEC2ServiceCount

    \n
  • \n
  • \n

    activeFargateServiceCount

    \n
  • \n
  • \n

    drainingEC2ServiceCount

    \n
  • \n
  • \n

    drainingFargateServiceCount

    \n
  • \n
" + "smithy.api#documentation": "

Additional information about your clusters that are separated by launch type. They\n\t\t\tinclude the following:

\n
    \n
  • \n

    runningEC2TasksCount

    \n
  • \n
  • \n

    RunningFargateTasksCount

    \n
  • \n
  • \n

    pendingEC2TasksCount

    \n
  • \n
  • \n

    pendingFargateTasksCount

    \n
  • \n
  • \n

    activeEC2ServiceCount

    \n
  • \n
  • \n

    activeFargateServiceCount

    \n
  • \n
  • \n

    drainingEC2ServiceCount

    \n
  • \n
  • \n

    drainingFargateServiceCount

    \n
  • \n
" } }, "tags": { "target": "com.amazonaws.ecs#Tags", "traits": { - "smithy.api#documentation": "

The metadata that you apply to the cluster to help you categorize and organize them. Each tag\n\t\t\tconsists of a key and an optional value. You define both.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" + "smithy.api#documentation": "

The metadata that you apply to the cluster to help you categorize and organize them.\n\t\t\tEach tag consists of a key and an optional value. You define both.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" } }, "settings": { "target": "com.amazonaws.ecs#ClusterSettings", "traits": { - "smithy.api#documentation": "

The settings for the cluster. This parameter indicates whether CloudWatch Container Insights is on or off\n\t\t\tfor a cluster.

" + "smithy.api#documentation": "

The settings for the cluster. This parameter indicates whether CloudWatch Container Insights\n\t\t\tis on or off for a cluster.

" } }, "capacityProviders": { @@ -1874,19 +1874,19 @@ "defaultCapacityProviderStrategy": { "target": "com.amazonaws.ecs#CapacityProviderStrategy", "traits": { - "smithy.api#documentation": "

The default capacity provider strategy for the cluster. When services or tasks are run in the cluster\n\t\t\twith no launch type or capacity provider strategy specified, the default capacity provider strategy is\n\t\t\tused.

" + "smithy.api#documentation": "

The default capacity provider strategy for the cluster. When services or tasks are run\n\t\t\tin the cluster with no launch type or capacity provider strategy specified, the default\n\t\t\tcapacity provider strategy is used.

" } }, "attachments": { "target": "com.amazonaws.ecs#Attachments", "traits": { - "smithy.api#documentation": "

The resources attached to a cluster. When using a capacity provider with a cluster, the capacity\n\t\t\tprovider and associated resources are returned as cluster attachments.

" + "smithy.api#documentation": "

The resources attached to a cluster. When using a capacity provider with a cluster,\n\t\t\tthe capacity provider and associated resources are returned as cluster\n\t\t\tattachments.

" } }, "attachmentsStatus": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The status of the capacity providers associated with the cluster. The following are the states that\n\t\t\tare returned.

\n
\n
UPDATE_IN_PROGRESS
\n
\n

The available capacity providers for the cluster are updating.

\n
\n
UPDATE_COMPLETE
\n
\n

The capacity providers have successfully updated.

\n
\n
UPDATE_FAILED
\n
\n

The capacity provider updates failed.

\n
\n
" + "smithy.api#documentation": "

The status of the capacity providers associated with the cluster. The following are\n\t\t\tthe states that are returned.

\n
\n
UPDATE_IN_PROGRESS
\n
\n

The available capacity providers for the cluster are updating.

\n
\n
UPDATE_COMPLETE
\n
\n

The capacity providers have successfully updated.

\n
\n
UPDATE_FAILED
\n
\n

The capacity provider updates failed.

\n
\n
" } }, "serviceConnectDefaults": { @@ -1897,7 +1897,7 @@ } }, "traits": { - "smithy.api#documentation": "

A regional grouping of one or more container instances where you can run task requests. Each account\n\t\t\treceives a default cluster the first time you use the Amazon ECS service, but you may also create other\n\t\t\tclusters. Clusters may contain more than one instance type simultaneously.

" + "smithy.api#documentation": "

A regional grouping of one or more container instances where you can run task\n\t\t\trequests. Each account receives a default cluster the first time you use the Amazon ECS\n\t\t\tservice, but you may also create other clusters. Clusters may contain more than one\n\t\t\tinstance type simultaneously.

" } }, "com.amazonaws.ecs#ClusterConfiguration": { @@ -1931,7 +1931,7 @@ } }, "traits": { - "smithy.api#documentation": "

You can't delete a cluster that has registered container instances. First, deregister the container\n\t\t\tinstances before you can delete the cluster. For more information, see DeregisterContainerInstance.

", + "smithy.api#documentation": "

You can't delete a cluster that has registered container instances. First, deregister\n\t\t\tthe container instances before you can delete the cluster. For more information, see\n\t\t\t\tDeregisterContainerInstance.

", "smithy.api#error": "client" } }, @@ -1946,7 +1946,7 @@ } }, "traits": { - "smithy.api#documentation": "

You can't delete a cluster that contains services. First, update the service to reduce its desired\n\t\t\ttask count to 0, and then delete the service. For more information, see UpdateService and DeleteService.

", + "smithy.api#documentation": "

You can't delete a cluster that contains services. First, update the service to reduce\n\t\t\tits desired task count to 0, and then delete the service. For more information, see\n\t\t\t\tUpdateService and\n\t\t\t\tDeleteService.

", "smithy.api#error": "client" } }, @@ -2027,7 +2027,7 @@ "namespace": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace. When you create a service and don't specify a Service Connect\n\t\t\tconfiguration, this namespace is used.

" + "smithy.api#documentation": "

The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace. When you create a service and don't specify a\n\t\t\tService Connect configuration, this namespace is used.

" } } }, @@ -2041,7 +2041,7 @@ "namespace": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace that's used when you create a service and don't specify a\n\t\t\tService Connect configuration. The namespace name can include up to 1024 characters. The name is\n\t\t\tcase-sensitive. The name can't include hyphens (-), tilde (~), greater than (>), less than (<),\n\t\t\tor slash (/).

\n

If you enter an existing namespace name or ARN, then that namespace will be used. Any namespace\n\t\t\ttype is supported. The namespace must be in this account and this Amazon Web Services Region.

\n

If you enter a new name, a Cloud Map namespace will be created. Amazon ECS creates a Cloud Map namespace\n\t\t\twith the \"API calls\" method of instance discovery only. This instance discovery method is the \"HTTP\"\n\t\t\tnamespace type in the Command Line Interface. Other types of instance discovery aren't used by\n\t\t\tService Connect.

\n

If you update the cluster with an empty string \"\" for the namespace name, the cluster\n\t\t\tconfiguration for Service Connect is removed. Note that the namespace will remain in Cloud Map and must\n\t\t\tbe deleted separately.

\n

For more information about Cloud Map, see Working with Services in the\n\t\t\tCloud Map Developer Guide.

", + "smithy.api#documentation": "

The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace that's used when you create a service and don't specify\n\t\t\ta Service Connect configuration. The namespace name can include up to 1024 characters.\n\t\t\tThe name is case-sensitive. The name can't include hyphens (-), tilde (~), greater than\n\t\t\t(>), less than (<), or slash (/).

\n

If you enter an existing namespace name or ARN, then that namespace will be used.\n\t\t\tAny namespace type is supported. The namespace must be in this account and this Amazon Web Services\n\t\t\tRegion.

\n

If you enter a new name, a Cloud Map namespace will be created. Amazon ECS creates a\n\t\t\tCloud Map namespace with the \"API calls\" method of instance discovery only. This instance\n\t\t\tdiscovery method is the \"HTTP\" namespace type in the Command Line Interface. Other types of instance\n\t\t\tdiscovery aren't used by Service Connect.

\n

If you update the cluster with an empty string \"\" for the namespace name,\n\t\t\tthe cluster configuration for Service Connect is removed. Note that the namespace will\n\t\t\tremain in Cloud Map and must be deleted separately.

\n

For more information about Cloud Map, see Working with Services\n\t\t\tin the Cloud Map Developer Guide.

", "smithy.api#required": {} } } @@ -2062,12 +2062,12 @@ "value": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The value to set for the cluster setting. The supported values are enhanced,\n\t\t\t\tenabled, and disabled.

\n

To use Container Insights with enhanced observability, set the\n\t\t\t\tcontainerInsights account setting to enhanced.

\n

To use Container Insights, set the containerInsights account setting to\n\t\t\t\tenabled.

\n

If a cluster value is specified, it will override the containerInsights value\n\t\t\tset with PutAccountSetting or PutAccountSettingDefault.

" + "smithy.api#documentation": "

The value to set for the cluster setting. The supported values are\n\t\t\t\tenhanced, enabled, and disabled.

\n

To use Container Insights with enhanced observability, set the\n\t\t\t\tcontainerInsights account setting to enhanced.

\n

To use Container Insights, set the containerInsights account setting to\n\t\t\t\tenabled.

\n

If a cluster value is specified, it will override the containerInsights\n\t\t\tvalue set with PutAccountSetting or PutAccountSettingDefault.

" } } }, "traits": { - "smithy.api#documentation": "

The settings to use when creating a cluster. This parameter is used to turn on CloudWatch Container\n\t\t\tInsights with enhanced observability or CloudWatch Container\n\t\t\tInsights for a cluster.

\n

Container Insights with enhanced observability provides all the Container Insights metrics,\n\t\t\tplus additional task and container metrics. This version supports enhanced observability\n\t\t\tfor Amazon ECS clusters using the Amazon EC2 and Fargate launch types. After you configure\n\t\t\tContainer Insights with enhanced observability on Amazon ECS, Container Insights\n\t\t\tauto-collects detailed infrastructure telemetry from the cluster level down to the container\n\t\t\tlevel in your environment and displays these critical performance data in curated\n\t\t\tdashboards removing the heavy lifting in observability set-up.

\n

For more information, see Monitor\n\t\t\t\tAmazon ECS containers using Container Insights with enhanced observability in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The settings to use when creating a cluster. This parameter is used to turn on CloudWatch\n\t\t\tContainer Insights with enhanced observability or CloudWatch Container Insights for a\n\t\t\tcluster.

\n

Container Insights with enhanced observability provides all the Container Insights\n\t\t\tmetrics, plus additional task and container metrics. This version supports enhanced\n\t\t\tobservability for Amazon ECS clusters using the Amazon EC2 and Fargate launch types. After you\n\t\t\tconfigure Container Insights with enhanced observability on Amazon ECS, Container Insights\n\t\t\tauto-collects detailed infrastructure telemetry from the cluster level down to the\n\t\t\tcontainer level in your environment and displays these critical performance data in\n\t\t\tcurated dashboards removing the heavy lifting in observability set-up.

\n

For more information, see Monitor\n\t\t\t\tAmazon ECS containers using Container Insights with enhanced observability in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#ClusterSettingName": { @@ -2128,7 +2128,7 @@ "resourceIds": { "target": "com.amazonaws.ecs#ResourceIds", "traits": { - "smithy.api#documentation": "

The existing task ARNs which are already associated with the clientToken.

" + "smithy.api#documentation": "

The existing task ARNs which are already associated with the\n\t\t\t\tclientToken.

" } }, "message": { @@ -2139,7 +2139,7 @@ } }, "traits": { - "smithy.api#documentation": "

The RunTask request could not be processed due to conflicts. The provided\n\t\t\t\tclientToken is already in use with a different RunTask request. The\n\t\t\t\tresourceIds are the existing task ARNs which are already associated with the\n\t\t\t\tclientToken.

\n

To fix this issue:

\n
    \n
  • \n

    Run RunTask with a unique clientToken.

    \n
  • \n
  • \n

    Run RunTask with the clientToken and the original set of\n\t\t\t\t\tparameters

    \n
  • \n
", + "smithy.api#documentation": "

The RunTask request could not be processed due to conflicts. The provided\n\t\t\t\tclientToken is already in use with a different RunTask\n\t\t\trequest. The resourceIds are the existing task ARNs which are already\n\t\t\tassociated with the clientToken.

\n

To fix this issue:

\n
    \n
  • \n

    Run RunTask with a unique clientToken.

    \n
  • \n
  • \n

    Run RunTask with the clientToken and the original\n\t\t\t\t\tset of parameters

    \n
  • \n
", "smithy.api#error": "client" } }, @@ -2214,7 +2214,7 @@ "reason": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

A short (255 max characters) human-readable string to provide additional details about a running or\n\t\t\tstopped container.

" + "smithy.api#documentation": "

A short (255 max characters) human-readable string to provide additional details about\n\t\t\ta running or stopped container.

" } }, "networkBindings": { @@ -2232,7 +2232,7 @@ "healthStatus": { "target": "com.amazonaws.ecs#HealthStatus", "traits": { - "smithy.api#documentation": "

The health status of the container. If health checks aren't configured for this container in its task\n\t\t\tdefinition, then it reports the health status as UNKNOWN.

" + "smithy.api#documentation": "

The health status of the container. If health checks aren't configured for this\n\t\t\tcontainer in its task definition, then it reports the health status as\n\t\t\t\tUNKNOWN.

" } }, "managedAgents": { @@ -2244,7 +2244,7 @@ "cpu": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The number of CPU units set for the container. The value is 0 if no value was specified\n\t\t\tin the container definition when the task definition was registered.

" + "smithy.api#documentation": "

The number of CPU units set for the container. The value is 0 if no value\n\t\t\twas specified in the container definition when the task definition was\n\t\t\tregistered.

" } }, "memory": { @@ -2305,13 +2305,13 @@ "name": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of a container. If you're linking multiple containers together in a task definition, the\n\t\t\t\tname of one container can be entered in the links of another container to\n\t\t\tconnect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to name in the docker\n\t\t\tcontainer create command and the --name option to docker run.

" + "smithy.api#documentation": "

The name of a container. If you're linking multiple containers together in a task\n\t\t\tdefinition, the name of one container can be entered in the\n\t\t\t\tlinks of another container to connect the containers.\n\t\t\tUp to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to name in the docker container\n\t\t\tcreate command and the --name option to docker run.

" } }, "image": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The image used to start a container. This string is passed directly to the Docker daemon. By default,\n\t\t\timages in the Docker Hub registry are available. Other repositories are specified with either \n repository-url/image:tag\n or \n repository-url/image@digest\n . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the docker container create\n\t\t\tcommand and the IMAGE parameter of docker run.

\n
    \n
  • \n

    When a new task starts, the Amazon ECS container agent pulls the latest version of the specified\n\t\t\t\t\timage and tag for the container to use. However, subsequent updates to a repository image\n\t\t\t\t\taren't propagated to already running tasks.

    \n
  • \n
  • \n

    Images in Amazon ECR repositories can be specified by either using the full\n\t\t\t\t\t\tregistry/repository:tag or registry/repository@digest. For\n\t\t\t\t\texample,\n\t\t\t\t\t\t012345678910.dkr.ecr..amazonaws.com/:latest\n\t\t\t\t\tor\n\t\t\t\t\t\t012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE.\n\t\t\t\t

    \n
  • \n
  • \n

    Images in official repositories on Docker Hub use a single name (for example,\n\t\t\t\t\t\tubuntu or mongo).

    \n
  • \n
  • \n

    Images in other repositories on Docker Hub are qualified with an organization name (for\n\t\t\t\t\texample, amazon/amazon-ecs-agent).

    \n
  • \n
  • \n

    Images in other online repositories are qualified further by a domain name (for example,\n\t\t\t\t\t\tquay.io/assemblyline/ubuntu).

    \n
  • \n
" + "smithy.api#documentation": "

The image used to start a container. This string is passed directly to the Docker\n\t\t\tdaemon. By default, images in the Docker Hub registry are available. Other repositories\n\t\t\tare specified with either \n repository-url/image:tag\n or \n repository-url/image@digest\n . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the docker\n\t\t\tcontainer create command and the IMAGE parameter of docker run.

\n
    \n
  • \n

    When a new task starts, the Amazon ECS container agent pulls the latest version of\n\t\t\t\t\tthe specified image and tag for the container to use. However, subsequent\n\t\t\t\t\tupdates to a repository image aren't propagated to already running tasks.

    \n
  • \n
  • \n

    Images in Amazon ECR repositories can be specified by either using the full\n\t\t\t\t\t\tregistry/repository:tag or\n\t\t\t\t\t\tregistry/repository@digest. For example,\n\t\t\t\t\t\t012345678910.dkr.ecr..amazonaws.com/:latest\n\t\t\t\t\tor\n\t\t\t\t\t\t012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE.\n\t\t\t\t

    \n
  • \n
  • \n

    Images in official repositories on Docker Hub use a single name (for example,\n\t\t\t\t\t\tubuntu or mongo).

    \n
  • \n
  • \n

    Images in other repositories on Docker Hub are qualified with an organization\n\t\t\t\t\tname (for example, amazon/amazon-ecs-agent).

    \n
  • \n
  • \n

    Images in other online repositories are qualified further by a domain name\n\t\t\t\t\t(for example, quay.io/assemblyline/ubuntu).

    \n
  • \n
" } }, "repositoryCredentials": { @@ -2324,240 +2324,240 @@ "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The number of cpu units reserved for the container. This parameter maps to\n\t\t\t\tCpuShares in the docker container create commandand the --cpu-shares\n\t\t\toption to docker run.

\n

This field is optional for tasks using the Fargate launch type, and the only\n\t\t\trequirement is that the total amount of CPU reserved for all containers within a task be lower than the\n\t\t\ttask-level cpu value.

\n \n

You can determine the number of CPU units that are available per EC2 instance type by multiplying\n\t\t\t\tthe vCPUs listed for that instance type on the Amazon EC2 Instances detail page by 1,024.

\n
\n

Linux containers share unallocated CPU units with other containers on the container instance with the\n\t\t\tsame ratio as their allocated amount. For example, if you run a single-container task on a single-core\n\t\t\tinstance type with 512 CPU units specified for that container, and that's the only task running on the\n\t\t\tcontainer instance, that container could use the full 1,024 CPU unit share at any given time. However,\n\t\t\tif you launched another copy of the same task on that container instance, each task is guaranteed a\n\t\t\tminimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the\n\t\t\tother container was not using it. If both tasks were 100% active all of the time, they would be limited\n\t\t\tto 512 CPU units.

\n

On Linux container instances, the Docker daemon on the container instance uses the CPU value to\n\t\t\tcalculate the relative CPU share ratios for running containers. The minimum valid CPU share value that\n\t\t\tthe Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is\n\t\t\t262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144\n\t\t\tin your container definitions. For CPU values below 2 (including null) or above 262144, the behavior\n\t\t\tvaries based on your Amazon ECS container agent version:

\n
    \n
  • \n

    \n Agent versions less than or equal to 1.1.0: Null and zero CPU\n\t\t\t\t\tvalues are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of\n\t\t\t\t\t1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.

    \n
  • \n
  • \n

    \n Agent versions greater than or equal to 1.2.0: Null, zero, and\n\t\t\t\t\tCPU values of 1 are passed to Docker as 2.

    \n
  • \n
  • \n

    \n Agent versions greater than or equal to 1.84.0: CPU values\n\t\t\t\t\tgreater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU\n\t\t\t\t\tshares.

    \n
  • \n
\n

On Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows\n\t\t\tcontainers only have access to the specified amount of CPU that's described in the task definition. A\n\t\t\tnull or zero CPU value is passed to Docker as 0, which Windows interprets as 1% of one\n\t\t\tCPU.

" + "smithy.api#documentation": "

The number of cpu units reserved for the container. This parameter maps\n\t\t\tto CpuShares in the docker container create commandand the\n\t\t\t\t--cpu-shares option to docker run.

\n

This field is optional for tasks using the Fargate launch type, and the\n\t\t\tonly requirement is that the total amount of CPU reserved for all containers within a\n\t\t\ttask be lower than the task-level cpu value.

\n \n

You can determine the number of CPU units that are available per EC2 instance type\n\t\t\t\tby multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page\n\t\t\t\tby 1,024.

\n
\n

Linux containers share unallocated CPU units with other containers on the container\n\t\t\tinstance with the same ratio as their allocated amount. For example, if you run a\n\t\t\tsingle-container task on a single-core instance type with 512 CPU units specified for\n\t\t\tthat container, and that's the only task running on the container instance, that\n\t\t\tcontainer could use the full 1,024 CPU unit share at any given time. However, if you\n\t\t\tlaunched another copy of the same task on that container instance, each task is\n\t\t\tguaranteed a minimum of 512 CPU units when needed. Moreover, each container could float\n\t\t\tto higher CPU usage if the other container was not using it. If both tasks were 100%\n\t\t\tactive all of the time, they would be limited to 512 CPU units.

\n

On Linux container instances, the Docker daemon on the container instance uses the CPU\n\t\t\tvalue to calculate the relative CPU share ratios for running containers. The minimum\n\t\t\tvalid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share\n\t\t\tvalue that the Linux kernel allows is 262144. However, the CPU parameter isn't required,\n\t\t\tand you can use CPU values below 2 or above 262144 in your container definitions. For\n\t\t\tCPU values below 2 (including null) or above 262144, the behavior varies based on your\n\t\t\tAmazon ECS container agent version:

\n
    \n
  • \n

    \n Agent versions less than or equal to 1.1.0:\n\t\t\t\t\tNull and zero CPU values are passed to Docker as 0, which Docker then converts\n\t\t\t\t\tto 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux\n\t\t\t\t\tkernel converts to two CPU shares.

    \n
  • \n
  • \n

    \n Agent versions greater than or equal to 1.2.0:\n\t\t\t\t\tNull, zero, and CPU values of 1 are passed to Docker as 2.

    \n
  • \n
  • \n

    \n Agent versions greater than or equal to\n\t\t\t\t\t\t1.84.0: CPU values greater than 256 vCPU are passed to Docker as\n\t\t\t\t\t256, which is equivalent to 262144 CPU shares.

    \n
  • \n
\n

On Windows container instances, the CPU limit is enforced as an absolute limit, or a\n\t\t\tquota. Windows containers only have access to the specified amount of CPU that's\n\t\t\tdescribed in the task definition. A null or zero CPU value is passed to Docker as\n\t\t\t\t0, which Windows interprets as 1% of one CPU.

" } }, "memory": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The amount (in MiB) of memory to present to the container. If your container attempts to exceed the\n\t\t\tmemory specified here, the container is killed. The total amount of memory reserved for all containers\n\t\t\twithin a task must be lower than the task memory value, if one is specified. This\n\t\t\tparameter maps to Memory in the docker container create command and the\n\t\t\t\t--memory option to docker run.

\n

If using the Fargate launch type, this parameter is optional.

\n

If using the EC2 launch type, you must specify either a task-level memory value or a\n\t\t\tcontainer-level memory value. If you specify both a container-level memory and\n\t\t\t\tmemoryReservation value, memory must be greater than\n\t\t\t\tmemoryReservation. If you specify memoryReservation, then that value is\n\t\t\tsubtracted from the available memory resources for the container instance where the container is\n\t\t\tplaced. Otherwise, the value of memory is used.

\n

The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't\n\t\t\tspecify less than 6 MiB of memory for your containers.

\n

The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So,\n\t\t\tdon't specify less than 4 MiB of memory for your containers.

" + "smithy.api#documentation": "

The amount (in MiB) of memory to present to the container. If your container attempts\n\t\t\tto exceed the memory specified here, the container is killed. The total amount of memory\n\t\t\treserved for all containers within a task must be lower than the task\n\t\t\t\tmemory value, if one is specified. This parameter maps to\n\t\t\t\tMemory in the docker container create command and the\n\t\t\t\t--memory option to docker run.

\n

If using the Fargate launch type, this parameter is optional.

\n

If using the EC2 launch type, you must specify either a task-level\n\t\t\tmemory value or a container-level memory value. If you specify both a container-level\n\t\t\t\tmemory and memoryReservation value, memory\n\t\t\tmust be greater than memoryReservation. If you specify\n\t\t\t\tmemoryReservation, then that value is subtracted from the available\n\t\t\tmemory resources for the container instance where the container is placed. Otherwise,\n\t\t\tthe value of memory is used.

\n

The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 6 MiB of memory for your containers.

\n

The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 4 MiB of memory for your containers.

" } }, "memoryReservation": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy\n\t\t\tcontention, Docker attempts to keep the container memory to this soft limit. However, your container\n\t\t\tcan consume more memory when it needs to, up to either the hard limit specified with the\n\t\t\t\tmemory parameter (if applicable), or all of the available memory on the container\n\t\t\tinstance, whichever comes first. This parameter maps to MemoryReservation in the docker\n\t\t\tcontainer create command and the --memory-reservation option to docker run.

\n

If a task-level memory value is not specified, you must specify a non-zero integer for one or both of\n\t\t\t\tmemory or memoryReservation in a container definition. If you specify\n\t\t\tboth, memory must be greater than memoryReservation. If you specify\n\t\t\t\tmemoryReservation, then that value is subtracted from the available memory resources\n\t\t\tfor the container instance where the container is placed. Otherwise, the value of memory\n\t\t\tis used.

\n

For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of\n\t\t\tmemory for short periods of time, you can set a memoryReservation of 128 MiB, and a\n\t\t\t\tmemory hard limit of 300 MiB. This configuration would allow the container to only\n\t\t\treserve 128 MiB of memory from the remaining resources on the container instance, but also allow the\n\t\t\tcontainer to consume more memory resources when needed.

\n

The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't\n\t\t\tspecify less than 6 MiB of memory for your containers.

\n

The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So,\n\t\t\tdon't specify less than 4 MiB of memory for your containers.

" + "smithy.api#documentation": "

The soft limit (in MiB) of memory to reserve for the container. When system memory is\n\t\t\tunder heavy contention, Docker attempts to keep the container memory to this soft limit.\n\t\t\tHowever, your container can consume more memory when it needs to, up to either the hard\n\t\t\tlimit specified with the memory parameter (if applicable), or all of the\n\t\t\tavailable memory on the container instance, whichever comes first. This parameter maps\n\t\t\tto MemoryReservation in the docker container create command and the\n\t\t\t\t--memory-reservation option to docker run.

\n

If a task-level memory value is not specified, you must specify a non-zero integer for\n\t\t\tone or both of memory or memoryReservation in a container\n\t\t\tdefinition. If you specify both, memory must be greater than\n\t\t\t\tmemoryReservation. If you specify memoryReservation, then\n\t\t\tthat value is subtracted from the available memory resources for the container instance\n\t\t\twhere the container is placed. Otherwise, the value of memory is\n\t\t\tused.

\n

For example, if your container normally uses 128 MiB of memory, but occasionally\n\t\t\tbursts to 256 MiB of memory for short periods of time, you can set a\n\t\t\t\tmemoryReservation of 128 MiB, and a memory hard limit of\n\t\t\t300 MiB. This configuration would allow the container to only reserve 128 MiB of memory\n\t\t\tfrom the remaining resources on the container instance, but also allow the container to\n\t\t\tconsume more memory resources when needed.

\n

The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 6 MiB of memory for your containers.

\n

The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 4 MiB of memory for your containers.

" } }, "links": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The links parameter allows containers to communicate with each other without the need\n\t\t\tfor port mappings. This parameter is only supported if the network mode of a task definition is\n\t\t\t\tbridge. The name:internalName construct is analogous to\n\t\t\t\tname:alias in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to\n\t\t\t\tLinks in the docker container create command and the --link option to\n\t\t\tdocker run.

\n \n

This parameter is not supported for Windows containers.

\n
\n \n

Containers that are collocated on a single container instance may be able to communicate with\n\t\t\t\teach other without requiring links or host port mappings. Network isolation is achieved on the\n\t\t\t\tcontainer instance using security groups and VPC settings.

\n
" + "smithy.api#documentation": "

The links parameter allows containers to communicate with each other\n\t\t\twithout the need for port mappings. This parameter is only supported if the network mode\n\t\t\tof a task definition is bridge. The name:internalName\n\t\t\tconstruct is analogous to name:alias in Docker links.\n\t\t\tUp to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to Links in the docker\n\t\t\tcontainer create command and the --link option to docker run.

\n \n

This parameter is not supported for Windows containers.

\n
\n \n

Containers that are collocated on a single container instance may be able to\n\t\t\t\tcommunicate with each other without requiring links or host port mappings. Network\n\t\t\t\tisolation is achieved on the container instance using security groups and VPC\n\t\t\t\tsettings.

\n
" } }, "portMappings": { "target": "com.amazonaws.ecs#PortMappingList", "traits": { - "smithy.api#documentation": "

The list of port mappings for the container. Port mappings allow containers to access ports on the\n\t\t\thost container instance to send or receive traffic.

\n

For task definitions that use the awsvpc network mode, only specify the\n\t\t\t\tcontainerPort. The hostPort can be left blank or it must be the same\n\t\t\tvalue as the containerPort.

\n

Port mappings on Windows use the NetNAT gateway address rather than\n\t\t\t\tlocalhost. There's no loopback for port mappings on Windows, so you can't access a\n\t\t\tcontainer's mapped port from the host itself.

\n

This parameter maps to PortBindings in the the docker container create command and the\n\t\t\t\t--publish option to docker run. If the network mode of a task definition is set to\n\t\t\t\tnone, then you can't specify port mappings. If the network mode of a task definition\n\t\t\tis set to host, then host ports must either be undefined or they must match the container\n\t\t\tport in the port mapping.

\n \n

After a task reaches the RUNNING status, manual and automatic host and container\n\t\t\t\tport assignments are visible in the Network Bindings section of a\n\t\t\t\tcontainer description for a selected task in the Amazon ECS console. The assignments are also visible in\n\t\t\t\tthe networkBindings section DescribeTasks\n\t\t\t\tresponses.

\n
" + "smithy.api#documentation": "

The list of port mappings for the container. Port mappings allow containers to access\n\t\t\tports on the host container instance to send or receive traffic.

\n

For task definitions that use the awsvpc network mode, only specify the\n\t\t\t\tcontainerPort. The hostPort can be left blank or it must\n\t\t\tbe the same value as the containerPort.

\n

Port mappings on Windows use the NetNAT gateway address rather than\n\t\t\t\tlocalhost. There's no loopback for port mappings on Windows, so you\n\t\t\tcan't access a container's mapped port from the host itself.

\n

This parameter maps to PortBindings in the the docker container create\n\t\t\tcommand and the --publish option to docker run. If the network mode of a\n\t\t\ttask definition is set to none, then you can't specify port mappings. If\n\t\t\tthe network mode of a task definition is set to host, then host ports must\n\t\t\teither be undefined or they must match the container port in the port mapping.

\n \n

After a task reaches the RUNNING status, manual and automatic host\n\t\t\t\tand container port assignments are visible in the Network\n\t\t\t\t\tBindings section of a container description for a selected task in\n\t\t\t\tthe Amazon ECS console. The assignments are also visible in the\n\t\t\t\t\tnetworkBindings section DescribeTasks\n\t\t\t\tresponses.

\n
" } }, "essential": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

If the essential parameter of a container is marked as true, and that\n\t\t\tcontainer fails or stops for any reason, all other containers that are part of the task are stopped. If\n\t\t\tthe essential parameter of a container is marked as false, its failure\n\t\t\tdoesn't affect the rest of the containers in a task. If this parameter is omitted, a container is\n\t\t\tassumed to be essential.

\n

All tasks must have at least one essential container. If you have an application that's composed of\n\t\t\tmultiple containers, group containers that are used for a common purpose into components, and separate\n\t\t\tthe different components into multiple task definitions. For more information, see Application Architecture in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

If the essential parameter of a container is marked as true,\n\t\t\tand that container fails or stops for any reason, all other containers that are part of\n\t\t\tthe task are stopped. If the essential parameter of a container is marked\n\t\t\tas false, its failure doesn't affect the rest of the containers in a task.\n\t\t\tIf this parameter is omitted, a container is assumed to be essential.

\n

All tasks must have at least one essential container. If you have an application\n\t\t\tthat's composed of multiple containers, group containers that are used for a common\n\t\t\tpurpose into components, and separate the different components into multiple task\n\t\t\tdefinitions. For more information, see Application\n\t\t\t\tArchitecture in the Amazon Elastic Container Service Developer Guide.

" } }, "restartPolicy": { "target": "com.amazonaws.ecs#ContainerRestartPolicy", "traits": { - "smithy.api#documentation": "

The restart policy for a container. When you set up a restart policy, Amazon ECS can restart the container\n\t\t\twithout needing to replace the task. For more information, see Restart individual containers\n\t\t\t\tin Amazon ECS tasks with container restart policies in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The restart policy for a container. When you set up a restart policy, Amazon ECS can\n\t\t\trestart the container without needing to replace the task. For more information, see\n\t\t\t\tRestart\n\t\t\t\tindividual containers in Amazon ECS tasks with container restart policies in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

" } }, "entryPoint": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "\n

Early versions of the Amazon ECS container agent don't properly handle entryPoint\n\t\t\t\tparameters. If you have problems using entryPoint, update your container agent or\n\t\t\t\tenter your commands and arguments as command array items instead.

\n
\n

The entry point that's passed to the container. This parameter maps to Entrypoint in the\n\t\t\tdocker container create command and the --entrypoint option to docker run.

" + "smithy.api#documentation": "\n

Early versions of the Amazon ECS container agent don't properly handle\n\t\t\t\t\tentryPoint parameters. If you have problems using\n\t\t\t\t\tentryPoint, update your container agent or enter your commands and\n\t\t\t\targuments as command array items instead.

\n
\n

The entry point that's passed to the container. This parameter maps to\n\t\t\t\tEntrypoint in the docker container create command and the\n\t\t\t\t--entrypoint option to docker run.

" } }, "command": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The command that's passed to the container. This parameter maps to Cmd in the docker\n\t\t\tcontainer create command and the COMMAND parameter to docker run. If there are multiple\n\t\t\targuments, each argument is a separated string in the array.

" + "smithy.api#documentation": "

The command that's passed to the container. This parameter maps to Cmd in\n\t\t\tthe docker container create command and the COMMAND parameter to docker\n\t\t\trun. If there are multiple arguments, each argument is a separated string in the\n\t\t\tarray.

" } }, "environment": { "target": "com.amazonaws.ecs#EnvironmentVariables", "traits": { - "smithy.api#documentation": "

The environment variables to pass to a container. This parameter maps to Env in the\n\t\t\tdocker container create command and the --env option to docker run.

\n \n

We don't recommend that you use plaintext environment variables for sensitive information, such\n\t\t\t\tas credential data.

\n
" + "smithy.api#documentation": "

The environment variables to pass to a container. This parameter maps to\n\t\t\t\tEnv in the docker container create command and the --env\n\t\t\toption to docker run.

\n \n

We don't recommend that you use plaintext environment variables for sensitive\n\t\t\t\tinformation, such as credential data.

\n
" } }, "environmentFiles": { "target": "com.amazonaws.ecs#EnvironmentFiles", "traits": { - "smithy.api#documentation": "

A list of files containing the environment variables to pass to a container. This parameter maps to\n\t\t\tthe --env-file option to docker run.

\n

You can specify up to ten environment files. The file must have a .env file extension.\n\t\t\tEach line in an environment file contains an environment variable in VARIABLE=VALUE\n\t\t\tformat. Lines beginning with # are treated as comments and are ignored.

\n

If there are environment variables specified using the environment parameter in a\n\t\t\tcontainer definition, they take precedence over the variables contained within an environment file. If\n\t\t\tmultiple environment files are specified that contain the same variable, they're processed from the top\n\t\t\tdown. We recommend that you use unique variable names. For more information, see Specifying\n\t\t\t\tEnvironment Variables in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

A list of files containing the environment variables to pass to a container. This\n\t\t\tparameter maps to the --env-file option to docker run.

\n

You can specify up to ten environment files. The file must have a .env\n\t\t\tfile extension. Each line in an environment file contains an environment variable in\n\t\t\t\tVARIABLE=VALUE format. Lines beginning with # are treated\n\t\t\tas comments and are ignored.

\n

If there are environment variables specified using the environment\n\t\t\tparameter in a container definition, they take precedence over the variables contained\n\t\t\twithin an environment file. If multiple environment files are specified that contain the\n\t\t\tsame variable, they're processed from the top down. We recommend that you use unique\n\t\t\tvariable names. For more information, see Specifying Environment\n\t\t\t\tVariables in the Amazon Elastic Container Service Developer Guide.

" } }, "mountPoints": { "target": "com.amazonaws.ecs#MountPointList", "traits": { - "smithy.api#documentation": "

The mount points for data volumes in your container.

\n

This parameter maps to Volumes in the docker container create command and the\n\t\t\t\t--volume option to docker run.

\n

Windows containers can mount whole directories on the same drive as $env:ProgramData.\n\t\t\tWindows containers can't mount directories on a different drive, and mount point can't be across\n\t\t\tdrives.

" + "smithy.api#documentation": "

The mount points for data volumes in your container.

\n

This parameter maps to Volumes in the docker container create command and\n\t\t\tthe --volume option to docker run.

\n

Windows containers can mount whole directories on the same drive as\n\t\t\t\t$env:ProgramData. Windows containers can't mount directories on a\n\t\t\tdifferent drive, and mount point can't be across drives.

" } }, "volumesFrom": { "target": "com.amazonaws.ecs#VolumeFromList", "traits": { - "smithy.api#documentation": "

Data volumes to mount from another container. This parameter maps to VolumesFrom in the\n\t\t\tdocker container create command and the --volumes-from option to docker run.

" + "smithy.api#documentation": "

Data volumes to mount from another container. This parameter maps to\n\t\t\t\tVolumesFrom in the docker container create command and the\n\t\t\t\t--volumes-from option to docker run.

" } }, "linuxParameters": { "target": "com.amazonaws.ecs#LinuxParameters", "traits": { - "smithy.api#documentation": "

Linux-specific modifications that are applied to the container, such as Linux kernel capabilities.\n\t\t\tFor more information see KernelCapabilities.

\n \n

This parameter is not supported for Windows containers.

\n
" + "smithy.api#documentation": "

Linux-specific modifications that are applied to the container, such as Linux kernel\n\t\t\tcapabilities. For more information see KernelCapabilities.

\n \n

This parameter is not supported for Windows containers.

\n
" } }, "secrets": { "target": "com.amazonaws.ecs#SecretList", "traits": { - "smithy.api#documentation": "

The secrets to pass to the container. For more information, see Specifying Sensitive\n\t\t\t\tData in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The secrets to pass to the container. For more information, see Specifying\n\t\t\t\tSensitive Data in the Amazon Elastic Container Service Developer Guide.

" } }, "dependsOn": { "target": "com.amazonaws.ecs#ContainerDependencies", "traits": { - "smithy.api#documentation": "

The dependencies defined for container startup and shutdown. A container can contain multiple\n\t\t\tdependencies on other containers in a task definition. When a dependency is defined for container\n\t\t\tstartup, for container shutdown it is reversed.

\n

For tasks using the EC2 launch type, the container instances require at least version\n\t\t\t1.26.0 of the container agent to turn on container dependencies. However, we recommend using the latest\n\t\t\tcontainer agent version. For information about checking your agent version and updating to the latest\n\t\t\tversion, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an\n\t\t\tAmazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If\n\t\t\tyour container instances are launched from version 20190301 or later, then they contain\n\t\t\tthe required versions of the container agent and ecs-init. For more information, see\n\t\t\t\tAmazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

\n

For tasks using the Fargate launch type, the task or service requires the following\n\t\t\tplatforms:

\n
    \n
  • \n

    Linux platform version 1.3.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
" + "smithy.api#documentation": "

The dependencies defined for container startup and shutdown. A container can contain\n\t\t\tmultiple dependencies on other containers in a task definition. When a dependency is\n\t\t\tdefined for container startup, for container shutdown it is reversed.

\n

For tasks using the EC2 launch type, the container instances require at\n\t\t\tleast version 1.26.0 of the container agent to turn on container dependencies. However,\n\t\t\twe recommend using the latest container agent version. For information about checking\n\t\t\tyour agent version and updating to the latest version, see Updating the Amazon ECS\n\t\t\t\tContainer Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI,\n\t\t\tyour instance needs at least version 1.26.0-1 of the ecs-init package. If\n\t\t\tyour container instances are launched from version 20190301 or later, then\n\t\t\tthey contain the required versions of the container agent and ecs-init. For\n\t\t\tmore information, see Amazon ECS-optimized Linux AMI\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

\n

For tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:

\n
    \n
  • \n

    Linux platform version 1.3.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
" } }, "startTimeout": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For\n\t\t\texample, you specify two containers in a task definition with containerA having a dependency on\n\t\t\tcontainerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a\n\t\t\t\tstartTimeout value is specified for containerB and it doesn't reach the desired status\n\t\t\twithin that time then containerA gives up and not start. This results in the task transitioning to a\n\t\t\t\tSTOPPED state.

\n \n

When the ECS_CONTAINER_START_TIMEOUT container agent configuration variable is used,\n\t\t\t\tit's enforced independently from this start timeout value.

\n
\n

For tasks using the Fargate launch type, the task or service requires the following\n\t\t\tplatforms:

\n
    \n
  • \n

    Linux platform version 1.3.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
\n

For tasks using the EC2 launch type, your container instances require at least version\n\t\t\t\t1.26.0 of the container agent to use a container start timeout value. However, we\n\t\t\trecommend using the latest container agent version. For information about checking your agent version\n\t\t\tand updating to the latest version, see Updating the Amazon ECS Container\n\t\t\t\tAgent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at\n\t\t\tleast version 1.26.0-1 of the ecs-init package. If your container instances\n\t\t\tare launched from version 20190301 or later, then they contain the required versions of\n\t\t\tthe container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

\n

The valid values for Fargate are 2-120 seconds.

" + "smithy.api#documentation": "

Time duration (in seconds) to wait before giving up on resolving dependencies for a\n\t\t\tcontainer. For example, you specify two containers in a task definition with containerA\n\t\t\thaving a dependency on containerB reaching a COMPLETE,\n\t\t\tSUCCESS, or HEALTHY status. If a startTimeout\n\t\t\tvalue is specified for containerB and it doesn't reach the desired status within that\n\t\t\ttime then containerA gives up and not start. This results in the task transitioning to a\n\t\t\t\tSTOPPED state.

\n \n

When the ECS_CONTAINER_START_TIMEOUT container agent configuration\n\t\t\t\tvariable is used, it's enforced independently from this start timeout value.

\n
\n

For tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:

\n
    \n
  • \n

    Linux platform version 1.3.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
\n

For tasks using the EC2 launch type, your container instances require at\n\t\t\tleast version 1.26.0 of the container agent to use a container start\n\t\t\ttimeout value. However, we recommend using the latest container agent version. For\n\t\t\tinformation about checking your agent version and updating to the latest version, see\n\t\t\t\tUpdating the Amazon ECS\n\t\t\t\tContainer Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI,\n\t\t\tyour instance needs at least version 1.26.0-1 of the ecs-init\n\t\t\tpackage. If your container instances are launched from version 20190301 or\n\t\t\tlater, then they contain the required versions of the container agent and\n\t\t\t\tecs-init. For more information, see Amazon ECS-optimized Linux AMI\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

\n

The valid values for Fargate are 2-120 seconds.

" } }, "stopTimeout": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit\n\t\t\tnormally on its own.

\n

For tasks using the Fargate launch type, the task or service requires the following\n\t\t\tplatforms:

\n
    \n
  • \n

    Linux platform version 1.3.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
\n

For tasks that use the Fargate launch type, the max stop timeout value is 120 seconds and if the\n\t\t\tparameter is not specified, the default value of 30 seconds is used.

\n

For tasks that use the EC2 launch type, if the stopTimeout parameter isn't\n\t\t\tspecified, the value set for the Amazon ECS container agent configuration variable\n\t\t\t\tECS_CONTAINER_STOP_TIMEOUT is used. If neither the stopTimeout parameter\n\t\t\tor the ECS_CONTAINER_STOP_TIMEOUT agent configuration variable are set, then the default\n\t\t\tvalues of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container\n\t\t\tinstances require at least version 1.26.0 of the container agent to use a container stop timeout value.\n\t\t\tHowever, we recommend using the latest container agent version. For information about checking your\n\t\t\tagent version and updating to the latest version, see Updating the Amazon ECS Container\n\t\t\t\tAgent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at\n\t\t\tleast version 1.26.0-1 of the ecs-init package. If your container instances are launched\n\t\t\tfrom version 20190301 or later, then they contain the required versions of the container\n\t\t\tagent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

\n

The valid values for Fargate are 2-120 seconds.

" + "smithy.api#documentation": "

Time duration (in seconds) to wait before the container is forcefully killed if it\n\t\t\tdoesn't exit normally on its own.

\n

For tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:

\n
    \n
  • \n

    Linux platform version 1.3.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
\n

For tasks that use the Fargate launch type, the max stop timeout value is 120\n\t\t\tseconds and if the parameter is not specified, the default value of 30 seconds is\n\t\t\tused.

\n

For tasks that use the EC2 launch type, if the stopTimeout\n\t\t\tparameter isn't specified, the value set for the Amazon ECS container agent configuration\n\t\t\tvariable ECS_CONTAINER_STOP_TIMEOUT is used. If neither the\n\t\t\t\tstopTimeout parameter or the ECS_CONTAINER_STOP_TIMEOUT\n\t\t\tagent configuration variable are set, then the default values of 30 seconds for Linux\n\t\t\tcontainers and 30 seconds on Windows containers are used. Your container instances\n\t\t\trequire at least version 1.26.0 of the container agent to use a container stop timeout\n\t\t\tvalue. However, we recommend using the latest container agent version. For information\n\t\t\tabout checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using\n\t\t\tan Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the\n\t\t\t\tecs-init package. If your container instances are launched from version\n\t\t\t\t20190301 or later, then they contain the required versions of the\n\t\t\tcontainer agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

\n

The valid values for Fargate are 2-120 seconds.

" } }, "versionConsistency": { "target": "com.amazonaws.ecs#VersionConsistency", "traits": { - "smithy.api#documentation": "

Specifies whether Amazon ECS will resolve the container image tag\n\t\t\tprovided in the container definition to an image digest. By default, the\n\t\t\tvalue is enabled. If you set the value for a container as\n\t\t\tdisabled, Amazon ECS will not resolve the provided container image tag\n\t\t\tto a digest and will use the original image URI specified in the container definition for deployment.\n\t\t\tFor more information about container image resolution, see Container image resolution in the Amazon ECS Developer Guide.

" + "smithy.api#documentation": "

Specifies whether Amazon ECS will resolve the container image tag provided in the container\n\t\t\tdefinition to an image digest. By default, the value is enabled. If you set\n\t\t\tthe value for a container as disabled, Amazon ECS will not resolve the provided\n\t\t\tcontainer image tag to a digest and will use the original image URI specified in the\n\t\t\tcontainer definition for deployment. For more information about container image\n\t\t\tresolution, see Container image resolution in the Amazon ECS Developer\n\t\t\t\tGuide.

" } }, "hostname": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The hostname to use for your container. This parameter maps to Hostname in the docker\n\t\t\tcontainer create command and the --hostname option to docker run.

\n \n

The hostname parameter is not supported if you're using the awsvpc\n\t\t\t\tnetwork mode.

\n
" + "smithy.api#documentation": "

The hostname to use for your container. This parameter maps to Hostname\n\t\t\tin the docker container create command and the --hostname option to docker\n\t\t\trun.

\n \n

The hostname parameter is not supported if you're using the\n\t\t\t\t\tawsvpc network mode.

\n
" } }, "user": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The user to use inside the container. This parameter maps to User in the docker\n\t\t\tcontainer create command and the --user option to docker run.

\n \n

When running tasks using the host network mode, don't run containers using the\n\t\t\t\troot user (UID 0). We recommend using a non-root user for better security.

\n
\n

You can specify the user using the following formats. If specifying a UID or GID, you\n\t\t\tmust specify it as a positive integer.

\n
    \n
  • \n

    \n user\n

    \n
  • \n
  • \n

    \n user:group\n

    \n
  • \n
  • \n

    \n uid\n

    \n
  • \n
  • \n

    \n uid:gid\n

    \n
  • \n
  • \n

    \n user:gid\n

    \n
  • \n
  • \n

    \n uid:group\n

    \n
  • \n
\n \n

This parameter is not supported for Windows containers.

\n
" + "smithy.api#documentation": "

The user to use inside the container. This parameter maps to User in the\n\t\t\tdocker container create command and the --user option to docker run.

\n \n

When running tasks using the host network mode, don't run containers\n\t\t\t\tusing the root user (UID 0). We recommend using a non-root user for better\n\t\t\t\tsecurity.

\n
\n

You can specify the user using the following formats. If specifying a UID\n\t\t\tor GID, you must specify it as a positive integer.

\n
    \n
  • \n

    \n user\n

    \n
  • \n
  • \n

    \n user:group\n

    \n
  • \n
  • \n

    \n uid\n

    \n
  • \n
  • \n

    \n uid:gid\n

    \n
  • \n
  • \n

    \n user:gid\n

    \n
  • \n
  • \n

    \n uid:group\n

    \n
  • \n
\n \n

This parameter is not supported for Windows containers.

\n
" } }, "workingDirectory": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The working directory to run commands inside the container in. This parameter maps to\n\t\t\t\tWorkingDir in the docker container create command and the --workdir\n\t\t\toption to docker run.

" + "smithy.api#documentation": "

The working directory to run commands inside the container in. This parameter maps to\n\t\t\t\tWorkingDir in the docker container create command and the\n\t\t\t\t--workdir option to docker run.

" } }, "disableNetworking": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

When this parameter is true, networking is off within the container. This parameter maps to\n\t\t\t\tNetworkDisabled in the docker container create command.

\n \n

This parameter is not supported for Windows containers.

\n
" + "smithy.api#documentation": "

When this parameter is true, networking is off within the container. This parameter\n\t\t\tmaps to NetworkDisabled in the docker container create command.

\n \n

This parameter is not supported for Windows containers.

\n
" } }, "privileged": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

When this parameter is true, the container is given elevated privileges on the host container\n\t\t\tinstance (similar to the root user). This parameter maps to Privileged in the\n\t\t\tdocker container create command and the --privileged option to docker run

\n \n

This parameter is not supported for Windows containers or tasks run on Fargate.

\n
" + "smithy.api#documentation": "

When this parameter is true, the container is given elevated privileges on the host\n\t\t\tcontainer instance (similar to the root user). This parameter maps to\n\t\t\t\tPrivileged in the docker container create command and the\n\t\t\t\t--privileged option to docker run

\n \n

This parameter is not supported for Windows containers or tasks run on Fargate.

\n
" } }, "readonlyRootFilesystem": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

When this parameter is true, the container is given read-only access to its root file system. This\n\t\t\tparameter maps to ReadonlyRootfs in the docker container create command and the\n\t\t\t\t--read-only option to docker run.

\n \n

This parameter is not supported for Windows containers.

\n
" + "smithy.api#documentation": "

When this parameter is true, the container is given read-only access to its root file\n\t\t\tsystem. This parameter maps to ReadonlyRootfs in the docker container\n\t\t\tcreate command and the --read-only option to docker run.

\n \n

This parameter is not supported for Windows containers.

\n
" } }, "dnsServers": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

A list of DNS servers that are presented to the container. This parameter maps to Dns in\n\t\t\tthe docker container create command and the --dns option to docker run.

\n \n

This parameter is not supported for Windows containers.

\n
" + "smithy.api#documentation": "

A list of DNS servers that are presented to the container. This parameter maps to\n\t\t\t\tDns in the docker container create command and the --dns\n\t\t\toption to docker run.

\n \n

This parameter is not supported for Windows containers.

\n
" } }, "dnsSearchDomains": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

A list of DNS search domains that are presented to the container. This parameter maps to\n\t\t\t\tDnsSearch in the docker container create command and the --dns-search\n\t\t\toption to docker run.

\n \n

This parameter is not supported for Windows containers.

\n
" + "smithy.api#documentation": "

A list of DNS search domains that are presented to the container. This parameter maps\n\t\t\tto DnsSearch in the docker container create command and the\n\t\t\t\t--dns-search option to docker run.

\n \n

This parameter is not supported for Windows containers.

\n
" } }, "extraHosts": { "target": "com.amazonaws.ecs#HostEntryList", "traits": { - "smithy.api#documentation": "

A list of hostnames and IP address mappings to append to the /etc/hosts file on the\n\t\t\tcontainer. This parameter maps to ExtraHosts in the docker container create command and\n\t\t\tthe --add-host option to docker run.

\n \n

This parameter isn't supported for Windows containers or tasks that use the awsvpc\n\t\t\t\tnetwork mode.

\n
" + "smithy.api#documentation": "

A list of hostnames and IP address mappings to append to the /etc/hosts\n\t\t\tfile on the container. This parameter maps to ExtraHosts in the docker\n\t\t\tcontainer create command and the --add-host option to docker run.

\n \n

This parameter isn't supported for Windows containers or tasks that use the\n\t\t\t\t\tawsvpc network mode.

\n
" } }, "dockerSecurityOptions": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

A list of strings to provide custom configuration for multiple security systems. This field isn't\n\t\t\tvalid for containers in tasks using the Fargate launch type.

\n

For Linux tasks on EC2, this parameter can be used to reference custom labels for\n\t\t\tSELinux and AppArmor multi-level security systems.

\n

For any tasks on EC2, this parameter can be used to reference a credential spec file\n\t\t\tthat configures a container for Active Directory authentication. For more information, see Using gMSAs for\n\t\t\t\tWindows Containers and Using gMSAs for Linux Containers in\n\t\t\tthe Amazon Elastic Container Service Developer Guide.

\n

This parameter maps to SecurityOpt in the docker container create command and the\n\t\t\t\t--security-opt option to docker run.

\n \n

The Amazon ECS container agent running on a container instance must register with the\n\t\t\t\t\tECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment\n\t\t\t\tvariables before containers placed on that instance can use these security options. For more\n\t\t\t\tinformation, see Amazon ECS Container Agent\n\t\t\t\t\tConfiguration in the Amazon Elastic Container Service Developer Guide.

\n
\n

Valid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" |\n\t\t\t\"credentialspec:CredentialSpecFilePath\"

" + "smithy.api#documentation": "

A list of strings to provide custom configuration for multiple security systems. This\n\t\t\tfield isn't valid for containers in tasks using the Fargate launch\n\t\t\ttype.

\n

For Linux tasks on EC2, this parameter can be used to reference custom\n\t\t\tlabels for SELinux and AppArmor multi-level security systems.

\n

For any tasks on EC2, this parameter can be used to reference a\n\t\t\tcredential spec file that configures a container for Active Directory authentication.\n\t\t\tFor more information, see Using gMSAs for Windows\n\t\t\t\tContainers and Using gMSAs for Linux\n\t\t\t\tContainers in the Amazon Elastic Container Service Developer Guide.

\n

This parameter maps to SecurityOpt in the docker container create command\n\t\t\tand the --security-opt option to docker run.

\n \n

The Amazon ECS container agent running on a container instance must register with the\n\t\t\t\t\tECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true\n\t\t\t\tenvironment variables before containers placed on that instance can use these\n\t\t\t\tsecurity options. For more information, see Amazon ECS Container\n\t\t\t\t\tAgent Configuration in the Amazon Elastic Container Service Developer Guide.

\n
\n

Valid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" |\n\t\t\t\"credentialspec:CredentialSpecFilePath\"

" } }, "interactive": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

When this parameter is true, you can deploy containerized applications that require\n\t\t\t\tstdin or a tty to be allocated. This parameter maps to\n\t\t\t\tOpenStdin in the docker container create command and the --interactive\n\t\t\toption to docker run.

" + "smithy.api#documentation": "

When this parameter is true, you can deploy containerized applications\n\t\t\tthat require stdin or a tty to be allocated. This parameter\n\t\t\tmaps to OpenStdin in the docker container create command and the\n\t\t\t\t--interactive option to docker run.

" } }, "pseudoTerminal": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

When this parameter is true, a TTY is allocated. This parameter maps to Tty\n\t\t\tin the docker container create command and the --tty option to docker run.

" + "smithy.api#documentation": "

When this parameter is true, a TTY is allocated. This parameter maps to\n\t\t\t\tTty in the docker container create command and the --tty\n\t\t\toption to docker run.

" } }, "dockerLabels": { "target": "com.amazonaws.ecs#DockerLabelsMap", "traits": { - "smithy.api#documentation": "

A key/value map of labels to add to the container. This parameter maps to Labels in the\n\t\t\tdocker container create command and the --label option to docker run.\n\t\t\tThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

" + "smithy.api#documentation": "

A key/value map of labels to add to the container. This parameter maps to\n\t\t\t\tLabels in the docker container create command and the\n\t\t\t\t--label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

" } }, "ulimits": { "target": "com.amazonaws.ecs#UlimitList", "traits": { - "smithy.api#documentation": "

A list of ulimits to set in the container. If a ulimit value is specified\n\t\t\tin a task definition, it overrides the default values set by Docker. This parameter maps to\n\t\t\t\tUlimits in the docker container create command and the --ulimit option to\n\t\t\tdocker run. Valid naming values are displayed in the Ulimit data type.

\n

Amazon ECS tasks hosted on Fargate use the default\n\t\t\t\t\t\t\tresource limit values set by the operating system with the exception of\n\t\t\t\t\t\t\tthe nofile resource limit parameter which Fargate\n\t\t\t\t\t\t\toverrides. The nofile resource limit sets a restriction on\n\t\t\t\t\t\t\tthe number of open files that a container can use. The default\n\t\t\t\t\t\t\t\tnofile soft limit is 65535 and the default hard limit\n\t\t\t\t\t\t\tis 65535.

\n

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

\n \n

This parameter is not supported for Windows containers.

\n
" + "smithy.api#documentation": "

A list of ulimits to set in the container. If a ulimit value\n\t\t\tis specified in a task definition, it overrides the default values set by Docker. This\n\t\t\tparameter maps to Ulimits in the docker container create command and the\n\t\t\t\t--ulimit option to docker run. Valid naming values are displayed in the\n\t\t\t\tUlimit data type.

\n

Amazon ECS tasks hosted on Fargate use the default\n\t\t\t\t\t\t\tresource limit values set by the operating system with the exception of\n\t\t\t\t\t\t\tthe nofile resource limit parameter which Fargate\n\t\t\t\t\t\t\toverrides. The nofile resource limit sets a restriction on\n\t\t\t\t\t\t\tthe number of open files that a container can use. The default\n\t\t\t\t\t\t\t\tnofile soft limit is 65535 and the default hard limit\n\t\t\t\t\t\t\tis 65535.

\n

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

\n \n

This parameter is not supported for Windows containers.

\n
" } }, "logConfiguration": { "target": "com.amazonaws.ecs#LogConfiguration", "traits": { - "smithy.api#documentation": "

The log configuration specification for the container.

\n

This parameter maps to LogConfig in the docker container create command and the\n\t\t\t\t--log-driver option to docker run. By default, containers use the same logging driver\n\t\t\tthat the Docker daemon uses. However the container can use a different logging driver than the Docker\n\t\t\tdaemon by specifying a log driver with this parameter in the container definition. To use a different\n\t\t\tlogging driver for a container, the log system must be configured properly on the container instance\n\t\t\t(or on a different log server for remote logging options).

\n \n

Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in\n\t\t\t\tthe LogConfiguration data type). Additional log drivers may be available in future\n\t\t\t\treleases of the Amazon ECS container agent.

\n
\n

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

\n \n

The Amazon ECS container agent running on a container instance must register the logging drivers\n\t\t\t\tavailable on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable\n\t\t\t\tbefore containers placed on that instance can use these log configuration options. For more\n\t\t\t\tinformation, see Amazon ECS Container Agent\n\t\t\t\t\tConfiguration in the Amazon Elastic Container Service Developer Guide.

\n
" + "smithy.api#documentation": "

The log configuration specification for the container.

\n

This parameter maps to LogConfig in the docker container create command\n\t\t\tand the --log-driver option to docker run. By default, containers use the\n\t\t\tsame logging driver that the Docker daemon uses. However the container can use a\n\t\t\tdifferent logging driver than the Docker daemon by specifying a log driver with this\n\t\t\tparameter in the container definition. To use a different logging driver for a\n\t\t\tcontainer, the log system must be configured properly on the container instance (or on a\n\t\t\tdifferent log server for remote logging options).

\n \n

Amazon ECS currently supports a subset of the logging drivers available to the Docker\n\t\t\t\tdaemon (shown in the LogConfiguration data type). Additional log drivers may be available in\n\t\t\t\tfuture releases of the Amazon ECS container agent.

\n
\n

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

\n \n

The Amazon ECS container agent running on a container instance must register the\n\t\t\t\tlogging drivers available on that instance with the\n\t\t\t\t\tECS_AVAILABLE_LOGGING_DRIVERS environment variable before\n\t\t\t\tcontainers placed on that instance can use these log configuration options. For more\n\t\t\t\tinformation, see Amazon ECS Container\n\t\t\t\t\tAgent Configuration in the Amazon Elastic Container Service Developer Guide.

\n
" } }, "healthCheck": { "target": "com.amazonaws.ecs#HealthCheck", "traits": { - "smithy.api#documentation": "

The container health check command and associated configuration parameters for the container. This\n\t\t\tparameter maps to HealthCheck in the docker container create command and the\n\t\t\t\tHEALTHCHECK parameter of docker run.

" + "smithy.api#documentation": "

The container health check command and associated configuration parameters for the\n\t\t\tcontainer. This parameter maps to HealthCheck in the docker container\n\t\t\tcreate command and the HEALTHCHECK parameter of docker run.

" } }, "systemControls": { "target": "com.amazonaws.ecs#SystemControls", "traits": { - "smithy.api#documentation": "

A list of namespaced kernel parameters to set in the container. This parameter maps to\n\t\t\t\tSysctls in the docker container create command and the --sysctl option to\n\t\t\tdocker run. For example, you can configure net.ipv4.tcp_keepalive_time setting to maintain\n\t\t\tlonger lived connections.

" + "smithy.api#documentation": "

A list of namespaced kernel parameters to set in the container. This parameter maps to\n\t\t\t\tSysctls in the docker container create command and the\n\t\t\t\t--sysctl option to docker run. For example, you can configure\n\t\t\t\tnet.ipv4.tcp_keepalive_time setting to maintain longer lived\n\t\t\tconnections.

" } }, "resourceRequirements": { "target": "com.amazonaws.ecs#ResourceRequirements", "traits": { - "smithy.api#documentation": "

The type and amount of a resource to assign to a container. The only supported resource is a\n\t\t\tGPU.

" + "smithy.api#documentation": "

The type and amount of a resource to assign to a container. The only supported\n\t\t\tresource is a GPU.

" } }, "firelensConfiguration": { "target": "com.amazonaws.ecs#FirelensConfiguration", "traits": { - "smithy.api#documentation": "

The FireLens configuration for the container. This is used to specify and configure a log router for\n\t\t\tcontainer logs. For more information, see Custom Log Routing in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The FireLens configuration for the container. This is used to specify and configure a\n\t\t\tlog router for container logs. For more information, see Custom Log Routing\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

" } }, "credentialSpecs": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

A list of ARNs in SSM or Amazon S3 to a credential spec (CredSpec) file that configures the\n\t\t\tcontainer for Active Directory authentication. We recommend that you use this parameter instead of the\n\t\t\t\tdockerSecurityOptions. The maximum number of ARNs is 1.

\n

There are two formats for each ARN.

\n
\n
credentialspecdomainless:MyARN
\n
\n

You use credentialspecdomainless:MyARN to provide a CredSpec\n\t\t\t\t\t\twith an additional section for a secret in Secrets Manager. You provide the login credentials to the\n\t\t\t\t\t\tdomain in the secret.

\n

Each task that runs on any container instance can join different domains.

\n

You can use this format without joining the container instance to a domain.

\n
\n
credentialspec:MyARN
\n
\n

You use credentialspec:MyARN to provide a CredSpec for a single\n\t\t\t\t\t\tdomain.

\n

You must join the container instance to the domain before you start any tasks that use\n\t\t\t\t\t\tthis task definition.

\n
\n
\n

In both formats, replace MyARN with the ARN in SSM or Amazon S3.

\n

If you provide a credentialspecdomainless:MyARN, the credspec must provide\n\t\t\ta ARN in Secrets Manager for a secret containing the username, password, and the domain to connect to. For better\n\t\t\tsecurity, the instance isn't joined to the domain for domainless authentication. Other applications on\n\t\t\tthe instance can't use the domainless credentials. You can use this parameter to run tasks on the same\n\t\t\tinstance, even it the tasks need to join different domains. For more information, see Using gMSAs for\n\t\t\t\tWindows Containers and Using gMSAs for Linux\n\t\t\tContainers.

" + "smithy.api#documentation": "

A list of ARNs in SSM or Amazon S3 to a credential spec (CredSpec) file that\n\t\t\tconfigures the container for Active Directory authentication. We recommend that you use\n\t\t\tthis parameter instead of the dockerSecurityOptions. The maximum number of\n\t\t\tARNs is 1.

\n

There are two formats for each ARN.

\n
\n
credentialspecdomainless:MyARN
\n
\n

You use credentialspecdomainless:MyARN to provide a\n\t\t\t\t\t\t\tCredSpec with an additional section for a secret in Secrets Manager.\n\t\t\t\t\t\tYou provide the login credentials to the domain in the secret.

\n

Each task that runs on any container instance can join different\n\t\t\t\t\t\tdomains.

\n

You can use this format without joining the container instance to a\n\t\t\t\t\t\tdomain.

\n
\n
credentialspec:MyARN
\n
\n

You use credentialspec:MyARN to provide a\n\t\t\t\t\t\t\tCredSpec for a single domain.

\n

You must join the container instance to the domain before you start any\n\t\t\t\t\t\ttasks that use this task definition.

\n
\n
\n

In both formats, replace MyARN with the ARN in SSM or Amazon S3.

\n

If you provide a credentialspecdomainless:MyARN, the\n\t\t\t\tcredspec must provide a ARN in Secrets Manager for a secret containing the\n\t\t\tusername, password, and the domain to connect to. For better security, the instance\n\t\t\tisn't joined to the domain for domainless authentication. Other applications on the\n\t\t\tinstance can't use the domainless credentials. You can use this parameter to run tasks\n\t\t\ton the same instance, even it the tasks need to join different domains. For more\n\t\t\tinformation, see Using gMSAs for Windows\n\t\t\t\tContainers and Using gMSAs for Linux\n\t\t\t\tContainers.

" } } }, "traits": { - "smithy.api#documentation": "

Container definitions are used in task definitions to describe the different containers that are\n\t\t\tlaunched as part of a task.

" + "smithy.api#documentation": "

Container definitions are used in task definitions to describe the different\n\t\t\tcontainers that are launched as part of a task.

" } }, "com.amazonaws.ecs#ContainerDefinitions": { @@ -2585,13 +2585,13 @@ "condition": { "target": "com.amazonaws.ecs#ContainerCondition", "traits": { - "smithy.api#documentation": "

The dependency condition of the container. The following are the available conditions and their\n\t\t\tbehavior:

\n
    \n
  • \n

    \n START - This condition emulates the behavior of links and volumes today.\n\t\t\t\t\tIt validates that a dependent container is started before permitting other containers to\n\t\t\t\t\tstart.

    \n
  • \n
  • \n

    \n COMPLETE - This condition validates that a dependent container runs to\n\t\t\t\t\tcompletion (exits) before permitting other containers to start. This can be useful for\n\t\t\t\t\tnonessential containers that run a script and then exit. This condition can't be set on an\n\t\t\t\t\tessential container.

    \n
  • \n
  • \n

    \n SUCCESS - This condition is the same as COMPLETE, but it also\n\t\t\t\t\trequires that the container exits with a zero status. This condition can't be set\n\t\t\t\t\ton an essential container.

    \n
  • \n
  • \n

    \n HEALTHY - This condition validates that the dependent container passes its\n\t\t\t\t\tDocker health check before permitting other containers to start. This requires that the\n\t\t\t\t\tdependent container has health checks configured. This condition is confirmed only at task\n\t\t\t\t\tstartup.

    \n
  • \n
", + "smithy.api#documentation": "

The dependency condition of the container. The following are the available conditions\n\t\t\tand their behavior:

\n
    \n
  • \n

    \n START - This condition emulates the behavior of links and\n\t\t\t\t\tvolumes today. It validates that a dependent container is started before\n\t\t\t\t\tpermitting other containers to start.

    \n
  • \n
  • \n

    \n COMPLETE - This condition validates that a dependent\n\t\t\t\t\tcontainer runs to completion (exits) before permitting other containers to\n\t\t\t\t\tstart. This can be useful for nonessential containers that run a script and then\n\t\t\t\t\texit. This condition can't be set on an essential container.

    \n
  • \n
  • \n

    \n SUCCESS - This condition is the same as\n\t\t\t\t\t\tCOMPLETE, but it also requires that the container exits with a\n\t\t\t\t\t\tzero status. This condition can't be set on an essential\n\t\t\t\t\tcontainer.

    \n
  • \n
  • \n

    \n HEALTHY - This condition validates that the dependent\n\t\t\t\t\tcontainer passes its Docker health check before permitting other containers to\n\t\t\t\t\tstart. This requires that the dependent container has health checks configured.\n\t\t\t\t\tThis condition is confirmed only at task startup.

    \n
  • \n
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

The dependencies defined for container startup and shutdown. A container can contain multiple\n\t\t\tdependencies. When a dependency is defined for container startup, for container shutdown it is\n\t\t\treversed.

\n

Your Amazon ECS container instances require at least version 1.26.0 of the container agent to use\n\t\t\tcontainer dependencies. However, we recommend using the latest container agent version. For information\n\t\t\tabout checking your agent version and updating to the latest version, see Updating the Amazon ECS Container\n\t\t\t\tAgent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at\n\t\t\tleast version 1.26.0-1 of the ecs-init package. If your container instances are launched\n\t\t\tfrom version 20190301 or later, then they contain the required versions of the container\n\t\t\tagent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

\n \n

For tasks that use the Fargate launch type, the task or service requires the\n\t\t\t\tfollowing platforms:

\n
    \n
  • \n

    Linux platform version 1.3.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
\n
\n

For more information about how to create a container dependency, see Container dependency in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The dependencies defined for container startup and shutdown. A container can contain\n\t\t\tmultiple dependencies. When a dependency is defined for container startup, for container\n\t\t\tshutdown it is reversed.

\n

Your Amazon ECS container instances require at least version 1.26.0 of the container agent\n\t\t\tto use container dependencies. However, we recommend using the latest container agent\n\t\t\tversion. For information about checking your agent version and updating to the latest\n\t\t\tversion, see Updating the Amazon ECS\n\t\t\t\tContainer Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI,\n\t\t\tyour instance needs at least version 1.26.0-1 of the ecs-init package. If\n\t\t\tyour container instances are launched from version 20190301 or later, then\n\t\t\tthey contain the required versions of the container agent and ecs-init. For\n\t\t\tmore information, see Amazon ECS-optimized Linux AMI\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

\n \n

For tasks that use the Fargate launch type, the task or service\n\t\t\t\trequires the following platforms:

\n
    \n
  • \n

    Linux platform version 1.3.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
\n
\n

For more information about how to create a container dependency, see Container dependency in the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#ContainerImage": { @@ -2617,7 +2617,7 @@ } }, "traits": { - "smithy.api#documentation": "

The details about the container image a service revision uses.

\n

To ensure that all tasks in a service use the same container image, Amazon ECS resolves\n\t\t\tcontainer image names and any image tags specified in the task definition to container\n\t\t\timage digests.

\n

After the container image digest has been established, Amazon ECS uses the digest to start\n\t\t\tany other desired tasks, and for any future service and service revision updates. This\n\t\t\tleads to all tasks in a service always running identical container images, resulting in\n\t\t\tversion consistency for your software. For more information, see Container image resolution in the Amazon ECS Developer Guide.

" + "smithy.api#documentation": "

The details about the container image a service revision uses.

\n

To ensure that all tasks in a service use the same container image, Amazon ECS\n\t\t\tresolves container image names and any image tags specified in the task definition to\n\t\t\tcontainer image digests.

\n

After the container image digest has been established, Amazon ECS uses the digest to\n\t\t\tstart any other desired tasks, and for any future service and service revision updates.\n\t\t\tThis leads to all tasks in a service always running identical container images,\n\t\t\tresulting in version consistency for your software. For more information, see Container image resolution in the Amazon ECS Developer Guide.

" } }, "com.amazonaws.ecs#ContainerImages": { @@ -2632,13 +2632,13 @@ "containerInstanceArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the container instance. For more information about the ARN format, see Amazon Resource Name (ARN)\n\t\t\tin the Amazon ECS Developer Guide.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the container instance. For more information about the ARN format,\n\t\t\tsee Amazon Resource Name (ARN) in the Amazon ECS Developer Guide.

" } }, "ec2InstanceId": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The ID of the container instance. For Amazon EC2 instances, this value is the Amazon EC2 instance ID. For\n\t\t\texternal instances, this value is the Amazon Web Services Systems Manager managed instance ID.

" + "smithy.api#documentation": "

The ID of the container instance. For Amazon EC2 instances, this value is the Amazon EC2\n\t\t\tinstance ID. For external instances, this value is the Amazon Web Services Systems Manager managed instance ID.

" } }, "capacityProviderName": { @@ -2651,31 +2651,31 @@ "target": "com.amazonaws.ecs#Long", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The version counter for the container instance. Every time a container instance experiences a change\n\t\t\tthat triggers a CloudWatch event, the version counter is incremented. If you're replicating your Amazon ECS\n\t\t\tcontainer instance state with CloudWatch Events, you can compare the version of a container instance\n\t\t\treported by the Amazon ECS APIs with the version reported in CloudWatch Events for the container instance\n\t\t\t(inside the detail object) to verify that the version in your event stream is\n\t\t\tcurrent.

" + "smithy.api#documentation": "

The version counter for the container instance. Every time a container instance\n\t\t\texperiences a change that triggers a CloudWatch event, the version counter is\n\t\t\tincremented. If you're replicating your Amazon ECS container instance state with CloudWatch\n\t\t\tEvents, you can compare the version of a container instance reported by the Amazon ECS APIs\n\t\t\twith the version reported in CloudWatch Events for the container instance (inside the\n\t\t\t\tdetail object) to verify that the version in your event stream is\n\t\t\tcurrent.

" } }, "versionInfo": { "target": "com.amazonaws.ecs#VersionInfo", "traits": { - "smithy.api#documentation": "

The version information for the Amazon ECS container agent and Docker daemon running on the container\n\t\t\tinstance.

" + "smithy.api#documentation": "

The version information for the Amazon ECS container agent and Docker daemon running on the\n\t\t\tcontainer instance.

" } }, "remainingResources": { "target": "com.amazonaws.ecs#Resources", "traits": { - "smithy.api#documentation": "

For CPU and memory resource types, this parameter describes the remaining CPU and memory that wasn't\n\t\t\talready allocated to tasks and is therefore available for new tasks. For port resource types, this\n\t\t\tparameter describes the ports that were reserved by the Amazon ECS container agent (at instance registration\n\t\t\ttime) and any task containers that have reserved port mappings on the host (with the host\n\t\t\tor bridge network mode). Any port that's not specified here is available for new\n\t\t\ttasks.

" + "smithy.api#documentation": "

For CPU and memory resource types, this parameter describes the remaining CPU and\n\t\t\tmemory that wasn't already allocated to tasks and is therefore available for new tasks.\n\t\t\tFor port resource types, this parameter describes the ports that were reserved by the\n\t\t\tAmazon ECS container agent (at instance registration time) and any task containers that have\n\t\t\treserved port mappings on the host (with the host or bridge\n\t\t\tnetwork mode). Any port that's not specified here is available for new tasks.

" } }, "registeredResources": { "target": "com.amazonaws.ecs#Resources", "traits": { - "smithy.api#documentation": "

For CPU and memory resource types, this parameter describes the amount of each resource that was\n\t\t\tavailable on the container instance when the container agent registered it with Amazon ECS. This value\n\t\t\trepresents the total amount of CPU and memory that can be allocated on this container instance to\n\t\t\ttasks. For port resource types, this parameter describes the ports that were reserved by the Amazon ECS\n\t\t\tcontainer agent when it registered the container instance with Amazon ECS.

" + "smithy.api#documentation": "

For CPU and memory resource types, this parameter describes the amount of each\n\t\t\tresource that was available on the container instance when the container agent\n\t\t\tregistered it with Amazon ECS. This value represents the total amount of CPU and memory that\n\t\t\tcan be allocated on this container instance to tasks. For port resource types, this\n\t\t\tparameter describes the ports that were reserved by the Amazon ECS container agent when it\n\t\t\tregistered the container instance with Amazon ECS.

" } }, "status": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The status of the container instance. The valid values are REGISTERING,\n\t\t\t\tREGISTRATION_FAILED, ACTIVE, INACTIVE,\n\t\t\t\tDEREGISTERING, or DRAINING.

\n

If your account has opted in to the awsvpcTrunking account setting, then any newly\n\t\t\tregistered container instance will transition to a REGISTERING status while the trunk\n\t\t\telastic network interface is provisioned for the instance. If the registration fails, the instance will\n\t\t\ttransition to a REGISTRATION_FAILED status. You can describe the container instance and\n\t\t\tsee the reason for failure in the statusReason parameter. Once the container instance is\n\t\t\tterminated, the instance transitions to a DEREGISTERING status while the trunk elastic\n\t\t\tnetwork interface is deprovisioned. The instance then transitions to an INACTIVE\n\t\t\tstatus.

\n

The ACTIVE status indicates that the container instance can accept tasks. The\n\t\t\t\tDRAINING indicates that new tasks aren't placed on the container instance and any\n\t\t\tservice tasks running on the container instance are removed if possible. For more information, see\n\t\t\t\tContainer instance draining in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The status of the container instance. The valid values are REGISTERING,\n\t\t\t\tREGISTRATION_FAILED, ACTIVE, INACTIVE,\n\t\t\t\tDEREGISTERING, or DRAINING.

\n

If your account has opted in to the awsvpcTrunking account setting, then\n\t\t\tany newly registered container instance will transition to a REGISTERING\n\t\t\tstatus while the trunk elastic network interface is provisioned for the instance. If the\n\t\t\tregistration fails, the instance will transition to a REGISTRATION_FAILED\n\t\t\tstatus. You can describe the container instance and see the reason for failure in the\n\t\t\t\tstatusReason parameter. Once the container instance is terminated, the\n\t\t\tinstance transitions to a DEREGISTERING status while the trunk elastic\n\t\t\tnetwork interface is deprovisioned. The instance then transitions to an\n\t\t\t\tINACTIVE status.

\n

The ACTIVE status indicates that the container instance can accept tasks.\n\t\t\tThe DRAINING indicates that new tasks aren't placed on the container\n\t\t\tinstance and any service tasks running on the container instance are removed if\n\t\t\tpossible. For more information, see Container instance draining in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

" } }, "statusReason": { @@ -2688,33 +2688,33 @@ "target": "com.amazonaws.ecs#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

This parameter returns true if the agent is connected to Amazon ECS. An instance with an\n\t\t\tagent that may be unhealthy or stopped return false. Only instances connected to an agent\n\t\t\tcan accept task placement requests.

" + "smithy.api#documentation": "

This parameter returns true if the agent is connected to Amazon ECS. An\n\t\t\tinstance with an agent that may be unhealthy or stopped return false. Only\n\t\t\tinstances connected to an agent can accept task placement requests.

" } }, "runningTasksCount": { "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The number of tasks on the container instance that have a desired status (desiredStatus)\n\t\t\tof RUNNING.

" + "smithy.api#documentation": "

The number of tasks on the container instance that have a desired status\n\t\t\t\t(desiredStatus) of RUNNING.

" } }, "pendingTasksCount": { "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The number of tasks on the container instance that are in the PENDING status.

" + "smithy.api#documentation": "

The number of tasks on the container instance that are in the PENDING\n\t\t\tstatus.

" } }, "agentUpdateStatus": { "target": "com.amazonaws.ecs#AgentUpdateStatus", "traits": { - "smithy.api#documentation": "

The status of the most recent agent update. If an update wasn't ever requested, this value is\n\t\t\t\tNULL.

" + "smithy.api#documentation": "

The status of the most recent agent update. If an update wasn't ever requested, this\n\t\t\tvalue is NULL.

" } }, "attributes": { "target": "com.amazonaws.ecs#Attributes", "traits": { - "smithy.api#documentation": "

The attributes set for the container instance, either by the Amazon ECS container agent at instance\n\t\t\tregistration or manually with the PutAttributes operation.

" + "smithy.api#documentation": "

The attributes set for the container instance, either by the Amazon ECS container agent at\n\t\t\tinstance registration or manually with the PutAttributes\n\t\t\toperation.

" } }, "registeredAt": { @@ -2726,13 +2726,13 @@ "attachments": { "target": "com.amazonaws.ecs#Attachments", "traits": { - "smithy.api#documentation": "

The resources attached to a container instance, such as an elastic network interface.

" + "smithy.api#documentation": "

The resources attached to a container instance, such as an elastic network\n\t\t\tinterface.

" } }, "tags": { "target": "com.amazonaws.ecs#Tags", "traits": { - "smithy.api#documentation": "

The metadata that you apply to the container instance to help you categorize and organize them. Each\n\t\t\ttag consists of a key and an optional value. You define both.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" + "smithy.api#documentation": "

The metadata that you apply to the container instance to help you categorize and\n\t\t\torganize them. Each tag consists of a key and an optional value. You define both.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" } }, "healthStatus": { @@ -2743,7 +2743,7 @@ } }, "traits": { - "smithy.api#documentation": "

An Amazon EC2 or External instance that's running the Amazon ECS agent and has been registered with a\n\t\t\tcluster.

" + "smithy.api#documentation": "

An Amazon EC2 or External instance that's running the Amazon ECS agent and has been registered\n\t\t\twith a cluster.

" } }, "com.amazonaws.ecs#ContainerInstanceField": { @@ -2775,13 +2775,13 @@ "overallStatus": { "target": "com.amazonaws.ecs#InstanceHealthCheckState", "traits": { - "smithy.api#documentation": "

The overall health status of the container instance. This is an aggregate status of all container\n\t\t\tinstance health checks.

" + "smithy.api#documentation": "

The overall health status of the container instance. This is an aggregate status of\n\t\t\tall container instance health checks.

" } }, "details": { "target": "com.amazonaws.ecs#InstanceHealthCheckResultList", "traits": { - "smithy.api#documentation": "

An array of objects representing the details of the container instance health status.

" + "smithy.api#documentation": "

An array of objects representing the details of the container instance health\n\t\t\tstatus.

" } } }, @@ -2836,54 +2836,54 @@ "name": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of the container that receives the override. This parameter is required if any override is\n\t\t\tspecified.

" + "smithy.api#documentation": "

The name of the container that receives the override. This parameter is required if\n\t\t\tany override is specified.

" } }, "command": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The command to send to the container that overrides the default command from the Docker image or the\n\t\t\ttask definition. You must also specify a container name.

" + "smithy.api#documentation": "

The command to send to the container that overrides the default command from the\n\t\t\tDocker image or the task definition. You must also specify a container name.

" } }, "environment": { "target": "com.amazonaws.ecs#EnvironmentVariables", "traits": { - "smithy.api#documentation": "

The environment variables to send to the container. You can add new environment variables, which are\n\t\t\tadded to the container at launch, or you can override the existing environment variables from the\n\t\t\tDocker image or the task definition. You must also specify a container name.

" + "smithy.api#documentation": "

The environment variables to send to the container. You can add new environment\n\t\t\tvariables, which are added to the container at launch, or you can override the existing\n\t\t\tenvironment variables from the Docker image or the task definition. You must also\n\t\t\tspecify a container name.

" } }, "environmentFiles": { "target": "com.amazonaws.ecs#EnvironmentFiles", "traits": { - "smithy.api#documentation": "

A list of files containing the environment variables to pass to a container, instead of the value\n\t\t\tfrom the container definition.

" + "smithy.api#documentation": "

A list of files containing the environment variables to pass to a container, instead\n\t\t\tof the value from the container definition.

" } }, "cpu": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The number of cpu units reserved for the container, instead of the default value from\n\t\t\tthe task definition. You must also specify a container name.

" + "smithy.api#documentation": "

The number of cpu units reserved for the container, instead of the\n\t\t\tdefault value from the task definition. You must also specify a container name.

" } }, "memory": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The hard limit (in MiB) of memory to present to the container, instead of the default value from the\n\t\t\ttask definition. If your container attempts to exceed the memory specified here, the container is\n\t\t\tkilled. You must also specify a container name.

" + "smithy.api#documentation": "

The hard limit (in MiB) of memory to present to the container, instead of the default\n\t\t\tvalue from the task definition. If your container attempts to exceed the memory\n\t\t\tspecified here, the container is killed. You must also specify a container name.

" } }, "memoryReservation": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The soft limit (in MiB) of memory to reserve for the container, instead of the default value from the\n\t\t\ttask definition. You must also specify a container name.

" + "smithy.api#documentation": "

The soft limit (in MiB) of memory to reserve for the container, instead of the default\n\t\t\tvalue from the task definition. You must also specify a container name.

" } }, "resourceRequirements": { "target": "com.amazonaws.ecs#ResourceRequirements", "traits": { - "smithy.api#documentation": "

The type and amount of a resource to assign to a container, instead of the default value from the\n\t\t\ttask definition. The only supported resource is a GPU.

" + "smithy.api#documentation": "

The type and amount of a resource to assign to a container, instead of the default\n\t\t\tvalue from the task definition. The only supported resource is a GPU.

" } } }, "traits": { - "smithy.api#documentation": "

The overrides that are sent to a container. An empty container override can be passed in. An example\n\t\t\tof an empty container override is {\"containerOverrides\": [ ] }. If a non-empty container\n\t\t\toverride is specified, the name parameter must be included.

\n

You can use Secrets Manager or Amazon Web Services Systems Manager Parameter Store to store the sensitive data. For\n\t\t\tmore information, see Retrieve secrets through environment\n\t\t\t\tvariables in the Amazon ECS Developer Guide.

" + "smithy.api#documentation": "

The overrides that are sent to a container. An empty container override can be passed\n\t\t\tin. An example of an empty container override is {\"containerOverrides\": [ ]\n\t\t\t\t}. If a non-empty container override is specified, the name\n\t\t\tparameter must be included.

\n

You can use Secrets Manager or Amazon Web Services Systems Manager Parameter Store to store the\n\t\t\tsensitive data. For more information, see Retrieve secrets through\n\t\t\t\tenvironment variables in the Amazon ECS Developer Guide.

" } }, "com.amazonaws.ecs#ContainerOverrides": { @@ -2905,18 +2905,18 @@ "ignoredExitCodes": { "target": "com.amazonaws.ecs#IntegerList", "traits": { - "smithy.api#documentation": "

A list of exit codes that Amazon ECS will ignore and not attempt a restart on. You can specify a maximum\n\t\t\tof 50 container exit codes. By default, Amazon ECS does not ignore any exit codes.

" + "smithy.api#documentation": "

A list of exit codes that Amazon ECS will ignore and not attempt a restart on. You can\n\t\t\tspecify a maximum of 50 container exit codes. By default, Amazon ECS does not ignore any exit\n\t\t\tcodes.

" } }, "restartAttemptPeriod": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

A period of time (in seconds) that the container must run for before a restart can be attempted. A\n\t\t\tcontainer can be restarted only once every restartAttemptPeriod seconds. If a container\n\t\t\tisn't able to run for this time period and exits early, it will not be restarted. You can set a minimum\n\t\t\t\trestartAttemptPeriod of 60 seconds and a maximum restartAttemptPeriod of\n\t\t\t1800 seconds. By default, a container must run for 300 seconds before it can be restarted.

" + "smithy.api#documentation": "

A period of time (in seconds) that the container must run for before a restart can be\n\t\t\tattempted. A container can be restarted only once every\n\t\t\t\trestartAttemptPeriod seconds. If a container isn't able to run for this\n\t\t\ttime period and exits early, it will not be restarted. You can set a minimum\n\t\t\t\trestartAttemptPeriod of 60 seconds and a maximum\n\t\t\t\trestartAttemptPeriod of 1800 seconds. By default, a container must run\n\t\t\tfor 300 seconds before it can be restarted.

" } } }, "traits": { - "smithy.api#documentation": "

You can enable a restart policy for each container defined in your task definition, to overcome\n\t\t\ttransient failures faster and maintain task availability. When you enable a restart policy for a\n\t\t\tcontainer, Amazon ECS can restart the container if it exits, without needing to replace the task. For more\n\t\t\tinformation, see Restart individual containers\n\t\t\t\tin Amazon ECS tasks with container restart policies in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

You can enable a restart policy for each container defined in your task definition, to\n\t\t\tovercome transient failures faster and maintain task availability. When you enable a\n\t\t\trestart policy for a container, Amazon ECS can restart the container if it exits, without\n\t\t\tneeding to replace the task. For more information, see Restart\n\t\t\t\tindividual containers in Amazon ECS tasks with container restart policies in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#ContainerStateChange": { @@ -2943,7 +2943,7 @@ "exitCode": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The exit code for the container, if the state change is a result of the container exiting.

" + "smithy.api#documentation": "

The exit code for the container, if the state change is a result of the container\n\t\t\texiting.

" } }, "networkBindings": { @@ -3007,7 +3007,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new capacity provider. Capacity providers are associated with an Amazon ECS cluster and are used\n\t\t\tin capacity provider strategies to facilitate cluster auto scaling.

\n

Only capacity providers that use an Auto Scaling group can be created. Amazon ECS tasks on Fargate use\n\t\t\tthe FARGATE and FARGATE_SPOT capacity providers. These providers are\n\t\t\tavailable to all accounts in the Amazon Web Services Regions that Fargate supports.

" + "smithy.api#documentation": "

Creates a new capacity provider. Capacity providers are associated with an Amazon ECS\n\t\t\tcluster and are used in capacity provider strategies to facilitate cluster auto\n\t\t\tscaling.

\n

Only capacity providers that use an Auto Scaling group can be created. Amazon ECS tasks on\n\t\t\tFargate use the FARGATE and FARGATE_SPOT capacity providers.\n\t\t\tThese providers are available to all accounts in the Amazon Web Services Regions that Fargate\n\t\t\tsupports.

" } }, "com.amazonaws.ecs#CreateCapacityProviderRequest": { @@ -3016,7 +3016,7 @@ "name": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of the capacity provider. Up to 255 characters are allowed. They include letters (both upper\n\t\t\tand lowercase letters), numbers, underscores (_), and hyphens (-). The name can't be prefixed with\n\t\t\t\t\"aws\", \"ecs\", or \"fargate\".

", + "smithy.api#documentation": "

The name of the capacity provider. Up to 255 characters are allowed. They include\n\t\t\tletters (both upper and lowercase letters), numbers, underscores (_), and hyphens (-).\n\t\t\tThe name can't be prefixed with \"aws\", \"ecs\", or\n\t\t\t\t\"fargate\".

", "smithy.api#required": {} } }, @@ -3030,7 +3030,7 @@ "tags": { "target": "com.amazonaws.ecs#Tags", "traits": { - "smithy.api#documentation": "

The metadata that you apply to the capacity provider to categorize and organize them more\n\t\t\tconveniently. Each tag consists of a key and an optional value. You define both of them.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" + "smithy.api#documentation": "

The metadata that you apply to the capacity provider to categorize and organize them\n\t\t\tmore conveniently. Each tag consists of a key and an optional value. You define both of\n\t\t\tthem.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" } } }, @@ -3075,7 +3075,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new Amazon ECS cluster. By default, your account receives a default cluster when\n\t\t\tyou launch your first container instance. However, you can create your own cluster with a unique\n\t\t\tname.

\n \n

When you call the CreateCluster API operation,\n\t\t\t\tAmazon ECS attempts to create the Amazon ECS service-linked role for your account. This is so that it can\n\t\t\t\tmanage required resources in other Amazon Web Services services on your behalf. However, if the user that makes\n\t\t\t\tthe call doesn't have permissions to create the service-linked role, it isn't created. For more\n\t\t\t\tinformation, see Using service-linked\n\t\t\t\t\troles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

\n
", + "smithy.api#documentation": "

Creates a new Amazon ECS cluster. By default, your account receives a default\n\t\t\tcluster when you launch your first container instance. However, you can create your own\n\t\t\tcluster with a unique name.

\n \n

When you call the CreateCluster\n\t\t\t\tAPI operation, Amazon ECS attempts to create the Amazon ECS service-linked role for your\n\t\t\t\taccount. This is so that it can manage required resources in other Amazon Web Services services on\n\t\t\t\tyour behalf. However, if the user that makes the call doesn't have permissions to\n\t\t\t\tcreate the service-linked role, it isn't created. For more information, see Using\n\t\t\t\t\tservice-linked roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

\n
", "smithy.api#examples": [ { "title": "To create a new cluster", @@ -3104,19 +3104,19 @@ "clusterName": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of your cluster. If you don't specify a name for your cluster, you create a cluster that's\n\t\t\tnamed default. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.

" + "smithy.api#documentation": "

The name of your cluster. If you don't specify a name for your cluster, you create a\n\t\t\tcluster that's named default. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.

" } }, "tags": { "target": "com.amazonaws.ecs#Tags", "traits": { - "smithy.api#documentation": "

The metadata that you apply to the cluster to help you categorize and organize them. Each tag\n\t\t\tconsists of a key and an optional value. You define both.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" + "smithy.api#documentation": "

The metadata that you apply to the cluster to help you categorize and organize them.\n\t\t\tEach tag consists of a key and an optional value. You define both.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" } }, "settings": { "target": "com.amazonaws.ecs#ClusterSettings", "traits": { - "smithy.api#documentation": "

The setting to use when creating a cluster. This parameter is used to turn on CloudWatch Container Insights\n\t\t\tfor a cluster. If this value is specified, it overrides the containerInsights value set\n\t\t\twith PutAccountSetting or PutAccountSettingDefault.

" + "smithy.api#documentation": "

The setting to use when creating a cluster. This parameter is used to turn on CloudWatch\n\t\t\tContainer Insights for a cluster. If this value is specified, it overrides the\n\t\t\t\tcontainerInsights value set with PutAccountSetting or PutAccountSettingDefault.

" } }, "configuration": { @@ -3128,13 +3128,13 @@ "capacityProviders": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The short name of one or more capacity providers to associate with the cluster. A capacity provider\n\t\t\tmust be associated with a cluster before it can be included as part of the default capacity provider\n\t\t\tstrategy of the cluster or used in a capacity provider strategy when calling the CreateService or RunTask actions.

\n

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must be\n\t\t\tcreated but not associated with another cluster. New Auto Scaling group capacity providers can be\n\t\t\tcreated with the CreateCapacityProvider\n\t\t\tAPI operation.

\n

To use a Fargate capacity provider, specify either the FARGATE or\n\t\t\t\tFARGATE_SPOT capacity providers. The Fargate capacity providers are available to all\n\t\t\taccounts and only need to be associated with a cluster to be used.

\n

The PutCapacityProvider API operation is used to update the list of available capacity\n\t\t\tproviders for a cluster after the cluster is created.

" + "smithy.api#documentation": "

The short name of one or more capacity providers to associate with the cluster. A\n\t\t\tcapacity provider must be associated with a cluster before it can be included as part of\n\t\t\tthe default capacity provider strategy of the cluster or used in a capacity provider\n\t\t\tstrategy when calling the CreateService or\n\t\t\t\tRunTask actions.

\n

If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must be created but not associated with another cluster. New Auto Scaling group\n\t\t\tcapacity providers can be created with the CreateCapacityProvider API operation.

\n

To use a Fargate capacity provider, specify either the FARGATE or\n\t\t\t\tFARGATE_SPOT capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be\n\t\t\tused.

\n

The PutCapacityProvider API operation is used to update the list of available\n\t\t\tcapacity providers for a cluster after the cluster is created.

" } }, "defaultCapacityProviderStrategy": { "target": "com.amazonaws.ecs#CapacityProviderStrategy", "traits": { - "smithy.api#documentation": "

The capacity provider strategy to set as the default for the cluster. After a default capacity\n\t\t\tprovider strategy is set for a cluster, when you call the CreateService or RunTask APIs with\n\t\t\tno capacity provider strategy or launch type specified, the default capacity provider strategy for the\n\t\t\tcluster is used.

\n

If a default capacity provider strategy isn't defined for a cluster when it was created, it can be\n\t\t\tdefined later with the PutClusterCapacityProviders API operation.

" + "smithy.api#documentation": "

The capacity provider strategy to set as the default for the cluster. After a default\n\t\t\tcapacity provider strategy is set for a cluster, when you call the CreateService or RunTask APIs with no\n\t\t\tcapacity provider strategy or launch type specified, the default capacity provider\n\t\t\tstrategy for the cluster is used.

\n

If a default capacity provider strategy isn't defined for a cluster when it was\n\t\t\tcreated, it can be defined later with the PutClusterCapacityProviders API operation.

" } }, "serviceConnectDefaults": { @@ -3200,7 +3200,7 @@ } ], "traits": { - "smithy.api#documentation": "

Runs and maintains your desired number of tasks from a specified task definition. If the number of\n\t\t\ttasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the\n\t\t\ttask in the specified cluster. To update an existing service, use UpdateService.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n \n

Amazon Elastic Inference (EI) is no longer available to customers.

\n
\n

In addition to maintaining the desired count of tasks in your service, you can optionally run your\n\t\t\tservice behind one or more load balancers. The load balancers distribute traffic across the tasks that\n\t\t\tare associated with the service. For more information, see Service load\n\t\t\t\tbalancing in the Amazon Elastic Container Service Developer Guide.

\n

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a\n\t\t\tservice. volumeConfigurations is only supported for REPLICA service and not DAEMON\n\t\t\tservice. For more infomation, see Amazon EBS\n\t\t\t\tvolumes in the Amazon Elastic Container Service Developer Guide.

\n

Tasks for services that don't use a load balancer are considered healthy if they're in the\n\t\t\t\tRUNNING state. Tasks for services that use a load balancer are considered healthy if\n\t\t\tthey're in the RUNNING state and are reported as healthy by the load balancer.

\n

There are two service scheduler strategies available:

\n
    \n
  • \n

    \n REPLICA - The replica scheduling strategy places and maintains your\n\t\t\t\t\tdesired number of tasks across your cluster. By default, the service scheduler spreads tasks\n\t\t\t\t\tacross Availability Zones. You can use task placement strategies and constraints to customize\n\t\t\t\t\ttask placement decisions. For more information, see Service\n\t\t\t\t\t\tscheduler concepts in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    \n DAEMON - The daemon scheduling strategy deploys exactly one task on each\n\t\t\t\t\tactive container instance that meets all of the task placement constraints that you specify in\n\t\t\t\t\tyour cluster. The service scheduler also evaluates the task placement constraints for running\n\t\t\t\t\ttasks. It also stops tasks that don't meet the placement constraints. When using this strategy,\n\t\t\t\t\tyou don't need to specify a desired number of tasks, a task placement strategy, or use Service\n\t\t\t\t\tAuto Scaling policies. For more information, see Service\n\t\t\t\t\t\tscheduler concepts in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
\n

You can optionally specify a deployment configuration for your service. The deployment is initiated\n\t\t\tby changing properties. For example, the deployment might be initiated by the task definition or by\n\t\t\tyour desired count of a service. You can use UpdateService. The default value for a replica service for\n\t\t\t\tminimumHealthyPercent is 100%. The default value for a daemon service for\n\t\t\t\tminimumHealthyPercent is 0%.

\n

If a service uses the ECS deployment controller, the minimum healthy percent represents\n\t\t\ta lower limit on the number of tasks in a service that must remain in the RUNNING state\n\t\t\tduring a deployment. Specifically, it represents it as a percentage of your desired number of tasks\n\t\t\t(rounded up to the nearest integer). This happens when any of your container instances are in the\n\t\t\t\tDRAINING state if the service contains tasks using the EC2 launch type.\n\t\t\tUsing this parameter, you can deploy without using additional cluster capacity. For example, if you set\n\t\t\tyour service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler\n\t\t\tmight stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in\n\t\t\tthe RUNNING state, tasks for services that don't use a load balancer are considered\n\t\t\thealthy . If they're in the RUNNING state and reported as healthy by the load balancer,\n\t\t\ttasks for services that do use a load balancer are considered healthy . The\n\t\t\tdefault value for minimum healthy percent is 100%.

\n

If a service uses the ECS deployment controller, the maximum\n\t\t\t\tpercent parameter represents an upper limit on the number of tasks in a service that are\n\t\t\tallowed in the RUNNING or PENDING state during a deployment. Specifically, it\n\t\t\trepresents it as a percentage of the desired number of tasks (rounded down to the nearest integer).\n\t\t\tThis happens when any of your container instances are in the DRAINING state if the service\n\t\t\tcontains tasks using the EC2 launch type. Using this parameter, you can define the\n\t\t\tdeployment batch size. For example, if your service has a desired number of four tasks and a maximum\n\t\t\tpercent value of 200%, the scheduler may start four new tasks before stopping the four older tasks\n\t\t\t(provided that the cluster resources required to do this are available). The default value for maximum\n\t\t\tpercent is 200%.

\n

If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller\n\t\t\ttypes and tasks that use the EC2 launch type, the minimum healthy\n\t\t\t\tpercent and maximum percent values are used only to\n\t\t\tdefine the lower and upper limit on the number of the tasks in the service that remain in the\n\t\t\t\tRUNNING state. This is while the container instances are in the DRAINING\n\t\t\tstate. If the tasks in the service use the Fargate launch type, the minimum healthy\n\t\t\tpercent and maximum percent values aren't used. This is the case even if they're currently visible when\n\t\t\tdescribing your service.

\n

When creating a service that uses the EXTERNAL deployment controller, you can specify\n\t\t\tonly parameters that aren't controlled at the task set level. The only required parameter is the\n\t\t\tservice name. You control your services using the CreateTaskSet. For more information, see Amazon ECS deployment\n\t\t\t\ttypes in the Amazon Elastic Container Service Developer Guide.

\n

When the service scheduler launches new tasks, it determines task placement. For information about\n\t\t\ttask placement and task placement strategies, see Amazon ECS task\n\t\t\t\tplacement in the Amazon Elastic Container Service Developer Guide\n

", + "smithy.api#documentation": "

Runs and maintains your desired number of tasks from a specified task definition. If\n\t\t\tthe number of tasks running in a service drops below the desiredCount,\n\t\t\tAmazon ECS runs another copy of the task in the specified cluster. To update an existing\n\t\t\tservice, use UpdateService.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n \n

Amazon Elastic Inference (EI) is no longer available to customers.

\n
\n

In addition to maintaining the desired count of tasks in your service, you can\n\t\t\toptionally run your service behind one or more load balancers. The load balancers\n\t\t\tdistribute traffic across the tasks that are associated with the service. For more\n\t\t\tinformation, see Service load balancing in the Amazon Elastic Container Service Developer Guide.

\n

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or\n\t\t\tupdating a service. volumeConfigurations is only supported for REPLICA\n\t\t\tservice and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

\n

Tasks for services that don't use a load balancer are considered healthy if they're in\n\t\t\tthe RUNNING state. Tasks for services that use a load balancer are\n\t\t\tconsidered healthy if they're in the RUNNING state and are reported as\n\t\t\thealthy by the load balancer.

\n

There are two service scheduler strategies available:

\n
    \n
  • \n

    \n REPLICA - The replica scheduling strategy places and\n\t\t\t\t\tmaintains your desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement decisions. For\n\t\t\t\t\tmore information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    \n DAEMON - The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container instance that meets all of the task placement\n\t\t\t\t\tconstraints that you specify in your cluster. The service scheduler also\n\t\t\t\t\tevaluates the task placement constraints for running tasks. It also stops tasks\n\t\t\t\t\tthat don't meet the placement constraints. When using this strategy, you don't\n\t\t\t\t\tneed to specify a desired number of tasks, a task placement strategy, or use\n\t\t\t\t\tService Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
\n

You can optionally specify a deployment configuration for your service. The deployment\n\t\t\tis initiated by changing properties. For example, the deployment might be initiated by\n\t\t\tthe task definition or by your desired count of a service. You can use UpdateService. The default value for a replica service for\n\t\t\t\tminimumHealthyPercent is 100%. The default value for a daemon service\n\t\t\tfor minimumHealthyPercent is 0%.

\n

If a service uses the ECS deployment controller, the minimum healthy\n\t\t\tpercent represents a lower limit on the number of tasks in a service that must remain in\n\t\t\tthe RUNNING state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of your desired number of tasks (rounded up to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can deploy without using additional cluster capacity. For example, if you\n\t\t\tset your service to have desired number of four tasks and a minimum healthy percent of\n\t\t\t50%, the scheduler might stop two existing tasks to free up cluster capacity before\n\t\t\tstarting two new tasks. If they're in the RUNNING state, tasks for services\n\t\t\tthat don't use a load balancer are considered healthy . If they're in the\n\t\t\t\tRUNNING state and reported as healthy by the load balancer, tasks for\n\t\t\tservices that do use a load balancer are considered healthy . The\n\t\t\tdefault value for minimum healthy percent is 100%.

\n

If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the\n\t\t\tnumber of tasks in a service that are allowed in the RUNNING or\n\t\t\t\tPENDING state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of the desired number of tasks (rounded down to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can define the deployment batch size. For example, if your service has a\n\t\t\tdesired number of four tasks and a maximum percent value of 200%, the scheduler may\n\t\t\tstart four new tasks before stopping the four older tasks (provided that the cluster\n\t\t\tresources required to do this are available). The default value for maximum percent is\n\t\t\t200%.

\n

If a service uses either the CODE_DEPLOY or EXTERNAL\n\t\t\tdeployment controller types and tasks that use the EC2 launch type, the\n\t\t\t\tminimum healthy percent and maximum percent values are used only to define the lower and upper limit\n\t\t\ton the number of the tasks in the service that remain in the RUNNING state.\n\t\t\tThis is while the container instances are in the DRAINING state. If the\n\t\t\ttasks in the service use the Fargate launch type, the minimum healthy\n\t\t\tpercent and maximum percent values aren't used. This is the case even if they're\n\t\t\tcurrently visible when describing your service.

\n

When creating a service that uses the EXTERNAL deployment controller, you\n\t\t\tcan specify only parameters that aren't controlled at the task set level. The only\n\t\t\trequired parameter is the service name. You control your services using the CreateTaskSet. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.

\n

When the service scheduler launches new tasks, it determines task placement. For\n\t\t\tinformation about task placement and task placement strategies, see Amazon ECS\n\t\t\t\ttask placement in the Amazon Elastic Container Service Developer Guide\n

", "smithy.api#examples": [ { "title": "To create a new service", @@ -3322,14 +3322,14 @@ "serviceName": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of your service. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. Service names must be unique within a cluster, but\n\t\t\tyou can have similarly named services in multiple clusters within a Region or across multiple\n\t\t\tRegions.

", + "smithy.api#documentation": "

The name of your service. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. Service names must be unique within\n\t\t\ta cluster, but you can have similarly named services in multiple clusters within a\n\t\t\tRegion or across multiple Regions.

", "smithy.api#required": {} } }, "taskDefinition": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The family and revision (family:revision) or full ARN of the\n\t\t\ttask definition to run in your service. If a revision isn't specified, the latest\n\t\t\t\tACTIVE revision is used.

\n

A task definition must be specified if the service uses either the ECS or\n\t\t\t\tCODE_DEPLOY deployment controllers.

\n

For more information about deployment types, see Amazon ECS deployment types.

" + "smithy.api#documentation": "

The family and revision (family:revision) or\n\t\t\tfull ARN of the task definition to run in your service. If a revision\n\t\t\tisn't specified, the latest ACTIVE revision is used.

\n

A task definition must be specified if the service uses either the ECS or\n\t\t\t\tCODE_DEPLOY deployment controllers.

\n

For more information about deployment types, see Amazon ECS deployment\n\t\t\t\ttypes.

" } }, "availabilityZoneRebalancing": { @@ -3341,19 +3341,19 @@ "loadBalancers": { "target": "com.amazonaws.ecs#LoadBalancers", "traits": { - "smithy.api#documentation": "

A load balancer object representing the load balancers to use with your service. For more\n\t\t\tinformation, see Service load balancing in the Amazon Elastic Container Service Developer Guide.

\n

If the service uses the rolling update (ECS) deployment controller and using either an\n\t\t\tApplication Load Balancer or Network Load Balancer, you must specify one or more target group ARNs to attach to the service. The\n\t\t\tservice-linked role is required for services that use multiple target groups. For more information, see\n\t\t\t\tUsing service-linked roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

\n

If the service uses the CODE_DEPLOY deployment controller, the service is required to\n\t\t\tuse either an Application Load Balancer or Network Load Balancer. When creating an CodeDeploy deployment group, you specify two target groups\n\t\t\t(referred to as a targetGroupPair). During a deployment, CodeDeploy determines which task set\n\t\t\tin your service has the status PRIMARY, and it associates one target group with it. Then,\n\t\t\tit also associates the other target group with the replacement task set. The load balancer can also\n\t\t\thave up to two listeners: a required listener for production traffic and an optional listener that you\n\t\t\tcan use to perform validation tests with Lambda functions before routing production traffic to\n\t\t\tit.

\n

If you use the CODE_DEPLOY deployment controller, these values can be changed when\n\t\t\tupdating the service.

\n

For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN, the container\n\t\t\tname, and the container port to access from the load balancer. The container name must be as it appears\n\t\t\tin a container definition. The load balancer name parameter must be omitted. When a task from this\n\t\t\tservice is placed on a container instance, the container instance and port combination is registered as\n\t\t\ta target in the target group that's specified here.

\n

For Classic Load Balancers, this object must contain the load balancer name, the container name , and the container\n\t\t\tport to access from the load balancer. The container name must be as it appears in a container\n\t\t\tdefinition. The target group ARN parameter must be omitted. When a task from this service is placed\n\t\t\ton a container instance, the container instance is registered with the load balancer that's specified\n\t\t\there.

\n

Services with tasks that use the awsvpc network mode (for example, those with the\n\t\t\tFargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers aren't supported. Also, when\n\t\t\tyou create any target groups for these services, you must choose ip as the target type,\n\t\t\tnot instance. This is because tasks that use the awsvpc network mode are\n\t\t\tassociated with an elastic network interface, not an Amazon EC2 instance.

" + "smithy.api#documentation": "

A load balancer object representing the load balancers to use with your service. For\n\t\t\tmore information, see Service load balancing in the Amazon Elastic Container Service Developer Guide.

\n

If the service uses the rolling update (ECS) deployment controller and\n\t\t\tusing either an Application Load Balancer or Network Load Balancer, you must specify one or more target group ARNs to attach\n\t\t\tto the service. The service-linked role is required for services that use multiple\n\t\t\ttarget groups. For more information, see Using service-linked roles for Amazon ECS in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

\n

If the service uses the CODE_DEPLOY deployment controller, the service is\n\t\t\trequired to use either an Application Load Balancer or Network Load Balancer. When creating an CodeDeploy deployment group, you\n\t\t\tspecify two target groups (referred to as a targetGroupPair). During a\n\t\t\tdeployment, CodeDeploy determines which task set in your service has the status\n\t\t\t\tPRIMARY, and it associates one target group with it. Then, it also\n\t\t\tassociates the other target group with the replacement task set. The load balancer can\n\t\t\talso have up to two listeners: a required listener for production traffic and an\n\t\t\toptional listener that you can use to perform validation tests with Lambda functions\n\t\t\tbefore routing production traffic to it.

\n

If you use the CODE_DEPLOY deployment controller, these values can be\n\t\t\tchanged when updating the service.

\n

For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN,\n\t\t\tthe container name, and the container port to access from the load balancer. The\n\t\t\tcontainer name must be as it appears in a container definition. The load balancer name\n\t\t\tparameter must be omitted. When a task from this service is placed on a container\n\t\t\tinstance, the container instance and port combination is registered as a target in the\n\t\t\ttarget group that's specified here.

\n

For Classic Load Balancers, this object must contain the load balancer name, the container name , and\n\t\t\tthe container port to access from the load balancer. The container name must be as it\n\t\t\tappears in a container definition. The target group ARN parameter must be omitted.\n\t\t\tWhen a task from this service is placed on a container instance, the container instance\n\t\t\tis registered with the load balancer that's specified here.

\n

Services with tasks that use the awsvpc network mode (for example, those\n\t\t\twith the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers\n\t\t\taren't supported. Also, when you create any target groups for these services, you must\n\t\t\tchoose ip as the target type, not instance. This is because\n\t\t\ttasks that use the awsvpc network mode are associated with an elastic\n\t\t\tnetwork interface, not an Amazon EC2 instance.

" } }, "serviceRegistries": { "target": "com.amazonaws.ecs#ServiceRegistries", "traits": { - "smithy.api#documentation": "

The details of the service discovery registry to associate with this service. For more information,\n\t\t\tsee Service\n\t\t\t\tdiscovery.

\n \n

Each service may be associated with one service registry. Multiple service registries for each\n\t\t\t\tservice isn't supported.

\n
" + "smithy.api#documentation": "

The details of the service discovery registry to associate with this service. For more\n\t\t\tinformation, see Service\n\t\t\t\tdiscovery.

\n \n

Each service may be associated with one service registry. Multiple service\n\t\t\t\tregistries for each service isn't supported.

\n
" } }, "desiredCount": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The number of instantiations of the specified task definition to place and keep running in your\n\t\t\tservice.

\n

This is required if schedulingStrategy is REPLICA or isn't specified. If\n\t\t\t\tschedulingStrategy is DAEMON then this isn't required.

" + "smithy.api#documentation": "

The number of instantiations of the specified task definition to place and keep\n\t\t\trunning in your service.

\n

This is required if schedulingStrategy is REPLICA or isn't\n\t\t\tspecified. If schedulingStrategy is DAEMON then this isn't\n\t\t\trequired.

" } }, "clientToken": { @@ -3365,93 +3365,93 @@ "launchType": { "target": "com.amazonaws.ecs#LaunchType", "traits": { - "smithy.api#documentation": "

The infrastructure that you run your service on. For more information, see Amazon ECS launch\n\t\t\t\ttypes in the Amazon Elastic Container Service Developer Guide.

\n

The FARGATE launch type runs your tasks on Fargate On-Demand infrastructure.

\n \n

Fargate Spot infrastructure is available for use but a capacity provider strategy must be used.\n\t\t\t\tFor more information, see Fargate capacity providers in the Amazon ECS Developer Guide.

\n
\n

The EC2 launch type runs your tasks on Amazon EC2 instances registered to your\n\t\t\tcluster.

\n

The EXTERNAL launch type runs your tasks on your on-premises server or virtual machine\n\t\t\t(VM) capacity registered to your cluster.

\n

A service can use either a launch type or a capacity provider strategy. If a launchType\n\t\t\tis specified, the capacityProviderStrategy parameter must be omitted.

" + "smithy.api#documentation": "

The infrastructure that you run your service on. For more information, see Amazon ECS\n\t\t\t\tlaunch types in the Amazon Elastic Container Service Developer Guide.

\n

The FARGATE launch type runs your tasks on Fargate On-Demand\n\t\t\tinfrastructure.

\n \n

Fargate Spot infrastructure is available for use but a capacity provider\n\t\t\t\tstrategy must be used. For more information, see Fargate capacity providers in the Amazon ECS\n\t\t\t\t\tDeveloper Guide.

\n
\n

The EC2 launch type runs your tasks on Amazon EC2 instances registered to your\n\t\t\tcluster.

\n

The EXTERNAL launch type runs your tasks on your on-premises server or\n\t\t\tvirtual machine (VM) capacity registered to your cluster.

\n

A service can use either a launch type or a capacity provider strategy. If a\n\t\t\t\tlaunchType is specified, the capacityProviderStrategy\n\t\t\tparameter must be omitted.

" } }, "capacityProviderStrategy": { "target": "com.amazonaws.ecs#CapacityProviderStrategy", "traits": { - "smithy.api#documentation": "

The capacity provider strategy to use for the service.

\n

If a capacityProviderStrategy is specified, the launchType parameter must\n\t\t\tbe omitted. If no capacityProviderStrategy or launchType is specified, the\n\t\t\t\tdefaultCapacityProviderStrategy for the cluster is used.

\n

A capacity provider strategy may contain a maximum of 6 capacity providers.

" + "smithy.api#documentation": "

The capacity provider strategy to use for the service.

\n

If a capacityProviderStrategy is specified, the launchType\n\t\t\tparameter must be omitted. If no capacityProviderStrategy or\n\t\t\t\tlaunchType is specified, the\n\t\t\t\tdefaultCapacityProviderStrategy for the cluster is used.

\n

A capacity provider strategy can contain a maximum of 20 capacity providers.

" } }, "platformVersion": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The platform version that your tasks in the service are running on. A platform version is specified\n\t\t\tonly for tasks using the Fargate launch type. If one isn't specified, the\n\t\t\t\tLATEST platform version is used. For more information, see Fargate platform versions in\n\t\t\tthe Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The platform version that your tasks in the service are running on. A platform version\n\t\t\tis specified only for tasks using the Fargate launch type. If one isn't\n\t\t\tspecified, the LATEST platform version is used. For more information, see\n\t\t\t\tFargate platform\n\t\t\t\tversions in the Amazon Elastic Container Service Developer Guide.

" } }, "role": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load balancer on\n\t\t\tyour behalf. This parameter is only permitted if you are using a load balancer with your service and\n\t\t\tyour task definition doesn't use the awsvpc network mode. If you specify the\n\t\t\t\trole parameter, you must also specify a load balancer object with the\n\t\t\t\tloadBalancers parameter.

\n \n

If your account has already created the Amazon ECS service-linked role, that role is used for your\n\t\t\t\tservice unless you specify a role here. The service-linked role is required if your task definition\n\t\t\t\tuses the awsvpc network mode or if the service is configured to use service discovery,\n\t\t\t\tan external deployment controller, multiple target groups, or Elastic Inference accelerators in\n\t\t\t\twhich case you don't specify a role here. For more information, see Using service-linked\n\t\t\t\t\troles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

\n
\n

If your specified role has a path other than /, then you must either specify the full\n\t\t\trole ARN (this is recommended) or prefix the role name with the path. For example, if a role with the\n\t\t\tname bar has a path of /foo/ then you would specify /foo/bar as\n\t\t\tthe role name. For more information, see Friendly names and\n\t\t\t\tpaths in the IAM User Guide.

" + "smithy.api#documentation": "

The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your\n\t\t\tload balancer on your behalf. This parameter is only permitted if you are using a load\n\t\t\tbalancer with your service and your task definition doesn't use the awsvpc\n\t\t\tnetwork mode. If you specify the role parameter, you must also specify a\n\t\t\tload balancer object with the loadBalancers parameter.

\n \n

If your account has already created the Amazon ECS service-linked role, that role is\n\t\t\t\tused for your service unless you specify a role here. The service-linked role is\n\t\t\t\trequired if your task definition uses the awsvpc network mode or if the\n\t\t\t\tservice is configured to use service discovery, an external deployment controller,\n\t\t\t\tmultiple target groups, or Elastic Inference accelerators in which case you don't\n\t\t\t\tspecify a role here. For more information, see Using\n\t\t\t\t\tservice-linked roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

\n
\n

If your specified role has a path other than /, then you must either\n\t\t\tspecify the full role ARN (this is recommended) or prefix the role name with the path.\n\t\t\tFor example, if a role with the name bar has a path of /foo/\n\t\t\tthen you would specify /foo/bar as the role name. For more information, see\n\t\t\t\tFriendly names and paths in the IAM User\n\t\t\tGuide.

" } }, "deploymentConfiguration": { "target": "com.amazonaws.ecs#DeploymentConfiguration", "traits": { - "smithy.api#documentation": "

Optional deployment parameters that control how many tasks run during the deployment and the ordering\n\t\t\tof stopping and starting tasks.

" + "smithy.api#documentation": "

Optional deployment parameters that control how many tasks run during the deployment\n\t\t\tand the ordering of stopping and starting tasks.

" } }, "placementConstraints": { "target": "com.amazonaws.ecs#PlacementConstraints", "traits": { - "smithy.api#documentation": "

An array of placement constraint objects to use for tasks in your service. You can specify a maximum\n\t\t\tof 10 constraints for each task. This limit includes constraints in the task definition and those\n\t\t\tspecified at runtime.

" + "smithy.api#documentation": "

An array of placement constraint objects to use for tasks in your service. You can\n\t\t\tspecify a maximum of 10 constraints for each task. This limit includes constraints in\n\t\t\tthe task definition and those specified at runtime.

" } }, "placementStrategy": { "target": "com.amazonaws.ecs#PlacementStrategies", "traits": { - "smithy.api#documentation": "

The placement strategy objects to use for tasks in your service. You can specify a maximum of 5\n\t\t\tstrategy rules for each service.

" + "smithy.api#documentation": "

The placement strategy objects to use for tasks in your service. You can specify a\n\t\t\tmaximum of 5 strategy rules for each service.

" } }, "networkConfiguration": { "target": "com.amazonaws.ecs#NetworkConfiguration", "traits": { - "smithy.api#documentation": "

The network configuration for the service. This parameter is required for task definitions that use\n\t\t\tthe awsvpc network mode to receive their own elastic network interface, and it isn't\n\t\t\tsupported for other network modes. For more information, see Task networking in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The network configuration for the service. This parameter is required for task\n\t\t\tdefinitions that use the awsvpc network mode to receive their own elastic\n\t\t\tnetwork interface, and it isn't supported for other network modes. For more information,\n\t\t\tsee Task networking\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

" } }, "healthCheckGracePeriodSeconds": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing, VPC Lattice, and container \n\t\t\thealth checks after a task has first started. If you don't specify a health check grace\n\t\t\tperiod value, the default value of 0 is used. If you don't use any of the health checks, \n\t\t\tthen healthCheckGracePeriodSeconds is unused.

\n

If your service's tasks take a while to start and respond to health checks, you can specify a\n\t\t\thealth check grace period of up to 2,147,483,647 seconds (about 69 years). During that time, the Amazon ECS\n\t\t\tservice scheduler ignores health check status. This grace period can prevent the service scheduler from\n\t\t\tmarking tasks as unhealthy and stopping them before they have time to come up.

" + "smithy.api#documentation": "

The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy\n\t\t\tElastic Load Balancing, VPC Lattice, and container health checks after a task has first started. If you don't\n\t\t\tspecify a health check grace period value, the default value of 0 is used.\n\t\t\tIf you don't use any of the health checks, then\n\t\t\t\thealthCheckGracePeriodSeconds is unused.

\n

If your service's tasks take a while to start and respond to health checks, you can\n\t\t\tspecify a health check grace period of up to 2,147,483,647 seconds (about 69 years).\n\t\t\tDuring that time, the Amazon ECS service scheduler ignores health check status. This grace\n\t\t\tperiod can prevent the service scheduler from marking tasks as unhealthy and stopping\n\t\t\tthem before they have time to come up.

" } }, "schedulingStrategy": { "target": "com.amazonaws.ecs#SchedulingStrategy", "traits": { - "smithy.api#documentation": "

The scheduling strategy to use for the service. For more information, see Services.

\n

There are two service scheduler strategies available:

\n
    \n
  • \n

    \n REPLICA-The replica scheduling strategy places and maintains the desired\n\t\t\t\t\tnumber of tasks across your cluster. By default, the service scheduler spreads tasks across\n\t\t\t\t\tAvailability Zones. You can use task placement strategies and constraints to customize task\n\t\t\t\t\tplacement decisions. This scheduler strategy is required if the service uses the\n\t\t\t\t\t\tCODE_DEPLOY or EXTERNAL deployment controller types.

    \n
  • \n
  • \n

    \n DAEMON-The daemon scheduling strategy deploys exactly one task on each\n\t\t\t\t\tactive container instance that meets all of the task placement constraints that you specify in\n\t\t\t\t\tyour cluster. The service scheduler also evaluates the task placement constraints for running\n\t\t\t\t\ttasks and will stop tasks that don't meet the placement constraints. When you're using this\n\t\t\t\t\tstrategy, you don't need to specify a desired number of tasks, a task placement strategy, or\n\t\t\t\t\tuse Service Auto Scaling policies.

    \n \n

    Tasks using the Fargate launch type or the CODE_DEPLOY or\n\t\t\t\t\t\t\tEXTERNAL deployment controller types don't support the DAEMON\n\t\t\t\t\t\tscheduling strategy.

    \n
    \n
  • \n
" + "smithy.api#documentation": "

The scheduling strategy to use for the service. For more information, see Services.

\n

There are two service scheduler strategies available:

\n
    \n
  • \n

    \n REPLICA-The replica scheduling strategy places and\n\t\t\t\t\tmaintains the desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement decisions. This\n\t\t\t\t\tscheduler strategy is required if the service uses the CODE_DEPLOY\n\t\t\t\t\tor EXTERNAL deployment controller types.

    \n
  • \n
  • \n

    \n DAEMON-The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container instance that meets all of the task placement\n\t\t\t\t\tconstraints that you specify in your cluster. The service scheduler also\n\t\t\t\t\tevaluates the task placement constraints for running tasks and will stop tasks\n\t\t\t\t\tthat don't meet the placement constraints. When you're using this strategy, you\n\t\t\t\t\tdon't need to specify a desired number of tasks, a task placement strategy, or\n\t\t\t\t\tuse Service Auto Scaling policies.

    \n \n

    Tasks using the Fargate launch type or the\n\t\t\t\t\t\t\tCODE_DEPLOY or EXTERNAL deployment controller\n\t\t\t\t\t\ttypes don't support the DAEMON scheduling strategy.

    \n
    \n
  • \n
" } }, "deploymentController": { "target": "com.amazonaws.ecs#DeploymentController", "traits": { - "smithy.api#documentation": "

The deployment controller to use for the service. If no deployment controller is specified, the\n\t\t\tdefault value of ECS is used.

" + "smithy.api#documentation": "

The deployment controller to use for the service. If no deployment controller is\n\t\t\tspecified, the default value of ECS is used.

" } }, "tags": { "target": "com.amazonaws.ecs#Tags", "traits": { - "smithy.api#documentation": "

The metadata that you apply to the service to help you categorize and organize them. Each tag\n\t\t\tconsists of a key and an optional value, both of which you define. When a service is deleted, the tags\n\t\t\tare deleted as well.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" + "smithy.api#documentation": "

The metadata that you apply to the service to help you categorize and organize them.\n\t\t\tEach tag consists of a key and an optional value, both of which you define. When a\n\t\t\tservice is deleted, the tags are deleted as well.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" } }, "enableECSManagedTags": { "target": "com.amazonaws.ecs#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more\n\t\t\tinformation, see Tagging your Amazon ECS resources in the Amazon Elastic Container Service Developer Guide.

\n

When you use Amazon ECS managed tags, you need to set the propagateTags request\n\t\t\tparameter.

" + "smithy.api#documentation": "

Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For\n\t\t\tmore information, see Tagging your Amazon ECS\n\t\t\t\tresources in the Amazon Elastic Container Service Developer Guide.

\n

When you use Amazon ECS managed tags, you need to set the propagateTags\n\t\t\trequest parameter.

" } }, "propagateTags": { "target": "com.amazonaws.ecs#PropagateTags", "traits": { - "smithy.api#documentation": "

Specifies whether to propagate the tags from the task definition to the task. If no value is\n\t\t\tspecified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To\n\t\t\tadd tags to a task after task creation, use the TagResource API action.

\n

You must set this to a value other than NONE when you use Cost Explorer. For more\n\t\t\tinformation, see Amazon ECS usage reports in the Amazon Elastic Container Service Developer Guide.

\n

The default is NONE.

" + "smithy.api#documentation": "

Specifies whether to propagate the tags from the task definition to the task. If no\n\t\t\tvalue is specified, the tags aren't propagated. Tags can only be propagated to the task\n\t\t\tduring task creation. To add tags to a task after task creation, use the TagResource API action.

\n

You must set this to a value other than NONE when you use Cost Explorer.\n\t\t\tFor more information, see Amazon ECS usage reports\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

\n

The default is NONE.

" } }, "enableExecuteCommand": { "target": "com.amazonaws.ecs#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Determines whether the execute command functionality is turned on for the service. If\n\t\t\t\ttrue, this enables execute command functionality on all containers in the service\n\t\t\ttasks.

" + "smithy.api#documentation": "

Determines whether the execute command functionality is turned on for the service. If\n\t\t\t\ttrue, this enables execute command functionality on all containers in\n\t\t\tthe service tasks.

" } }, "serviceConnectConfiguration": { @@ -3463,7 +3463,7 @@ "volumeConfigurations": { "target": "com.amazonaws.ecs#ServiceVolumeConfigurations", "traits": { - "smithy.api#documentation": "

The configuration for a volume specified in the task definition as a volume that is configured at\n\t\t\tlaunch time. Currently, the only supported volume type is an Amazon EBS volume.

" + "smithy.api#documentation": "

The configuration for a volume specified in the task definition as a volume that is\n\t\t\tconfigured at launch time. Currently, the only supported volume type is an Amazon EBS\n\t\t\tvolume.

" } }, "vpcLatticeConfigurations": { @@ -3483,7 +3483,7 @@ "service": { "target": "com.amazonaws.ecs#Service", "traits": { - "smithy.api#documentation": "

The full description of your service following the create call.

\n

A service will return either a capacityProviderStrategy or launchType\n\t\t\tparameter, but not both, depending where one was specified when it was created.

\n

If a service is using the ECS deployment controller, the\n\t\t\t\tdeploymentController and taskSets parameters will not be returned.

\n

if the service uses the CODE_DEPLOY deployment controller, the\n\t\t\t\tdeploymentController, taskSets and deployments parameters\n\t\t\twill be returned, however the deployments parameter will be an empty list.

" + "smithy.api#documentation": "

The full description of your service following the create call.

\n

A service will return either a capacityProviderStrategy or\n\t\t\t\tlaunchType parameter, but not both, depending where one was specified\n\t\t\twhen it was created.

\n

If a service is using the ECS deployment controller, the\n\t\t\t\tdeploymentController and taskSets parameters will not be\n\t\t\treturned.

\n

if the service uses the CODE_DEPLOY deployment controller, the\n\t\t\t\tdeploymentController, taskSets and\n\t\t\t\tdeployments parameters will be returned, however the\n\t\t\t\tdeployments parameter will be an empty list.

" } } }, @@ -3535,7 +3535,7 @@ } ], "traits": { - "smithy.api#documentation": "

Create a task set in the specified cluster and service. This is used when a service uses the\n\t\t\t\tEXTERNAL deployment controller type. For more information, see Amazon ECS deployment\n\t\t\t\ttypes in the Amazon Elastic Container Service Developer Guide.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n

For information about the maximum number of task sets and other quotas, see Amazon ECS service\n\t\t\t\tquotas in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Create a task set in the specified cluster and service. This is used when a service\n\t\t\tuses the EXTERNAL deployment controller type. For more information, see\n\t\t\t\tAmazon ECS deployment\n\t\t\t\ttypes in the Amazon Elastic Container Service Developer Guide.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n

For information about the maximum number of task sets and other quotas, see Amazon ECS\n\t\t\t\tservice quotas in the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#CreateTaskSetRequest": { @@ -3551,20 +3551,20 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service to create the task set\n\t\t\tin.

", + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service to create the\n\t\t\ttask set in.

", "smithy.api#required": {} } }, "externalId": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

An optional non-unique tag that identifies this task set in external systems. If the task set is\n\t\t\tassociated with a service discovery registry, the tasks in this task set will have the\n\t\t\t\tECS_TASK_SET_EXTERNAL_ID Cloud Map attribute set to the provided\n\t\t\tvalue.

" + "smithy.api#documentation": "

An optional non-unique tag that identifies this task set in external systems. If the\n\t\t\ttask set is associated with a service discovery registry, the tasks in this task set\n\t\t\twill have the ECS_TASK_SET_EXTERNAL_ID Cloud Map attribute set to the provided\n\t\t\tvalue.

" } }, "taskDefinition": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The task definition for the tasks in the task set to use. If a revision isn't specified, the latest\n\t\t\t\tACTIVE revision is used.

", + "smithy.api#documentation": "

The task definition for the tasks in the task set to use. If a revision isn't\n\t\t\tspecified, the latest ACTIVE revision is used.

", "smithy.api#required": {} } }, @@ -3577,37 +3577,37 @@ "loadBalancers": { "target": "com.amazonaws.ecs#LoadBalancers", "traits": { - "smithy.api#documentation": "

A load balancer object representing the load balancer to use with the task set. The supported load\n\t\t\tbalancer types are either an Application Load Balancer or a Network Load Balancer.

" + "smithy.api#documentation": "

A load balancer object representing the load balancer to use with the task set. The\n\t\t\tsupported load balancer types are either an Application Load Balancer or a Network Load Balancer.

" } }, "serviceRegistries": { "target": "com.amazonaws.ecs#ServiceRegistries", "traits": { - "smithy.api#documentation": "

The details of the service discovery registries to assign to this task set. For more information, see\n\t\t\t\tService\n\t\t\t\tdiscovery.

" + "smithy.api#documentation": "

The details of the service discovery registries to assign to this task set. For more\n\t\t\tinformation, see Service\n\t\t\t\tdiscovery.

" } }, "launchType": { "target": "com.amazonaws.ecs#LaunchType", "traits": { - "smithy.api#documentation": "

The launch type that new tasks in the task set uses. For more information, see Amazon ECS launch\n\t\t\t\ttypes in the Amazon Elastic Container Service Developer Guide.

\n

If a launchType is specified, the capacityProviderStrategy parameter must\n\t\t\tbe omitted.

" + "smithy.api#documentation": "

The launch type that new tasks in the task set uses. For more information, see Amazon ECS\n\t\t\t\tlaunch types in the Amazon Elastic Container Service Developer Guide.

\n

If a launchType is specified, the capacityProviderStrategy\n\t\t\tparameter must be omitted.

" } }, "capacityProviderStrategy": { "target": "com.amazonaws.ecs#CapacityProviderStrategy", "traits": { - "smithy.api#documentation": "

The capacity provider strategy to use for the task set.

\n

A capacity provider strategy consists of one or more capacity providers along with the\n\t\t\t\tbase and weight to assign to them. A capacity provider must be associated\n\t\t\twith the cluster to be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster.\n\t\t\tOnly capacity providers with an ACTIVE or UPDATING status can be used.

\n

If a capacityProviderStrategy is specified, the launchType parameter must\n\t\t\tbe omitted. If no capacityProviderStrategy or launchType is specified, the\n\t\t\t\tdefaultCapacityProviderStrategy for the cluster is used.

\n

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already\n\t\t\tbe created. New capacity providers can be created with the CreateCapacityProviderProviderAPI operation.

\n

To use a Fargate capacity provider, specify either the FARGATE or\n\t\t\t\tFARGATE_SPOT capacity providers. The Fargate capacity providers are available to all\n\t\t\taccounts and only need to be associated with a cluster to be used.

\n

The PutClusterCapacityProviders API operation is used to update the list of available capacity\n\t\t\tproviders for a cluster after the cluster is created.

" + "smithy.api#documentation": "

The capacity provider strategy to use for the task set.

\n

A capacity provider strategy consists of one or more capacity providers along with the\n\t\t\t\tbase and weight to assign to them. A capacity provider\n\t\t\tmust be associated with the cluster to be used in a capacity provider strategy. The\n\t\t\t\tPutClusterCapacityProviders API is used to associate a capacity provider\n\t\t\twith a cluster. Only capacity providers with an ACTIVE or\n\t\t\t\tUPDATING status can be used.

\n

If a capacityProviderStrategy is specified, the launchType\n\t\t\tparameter must be omitted. If no capacityProviderStrategy or\n\t\t\t\tlaunchType is specified, the\n\t\t\t\tdefaultCapacityProviderStrategy for the cluster is used.

\n

If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New capacity providers can be created with the CreateCapacityProviderProviderAPI operation.

\n

To use a Fargate capacity provider, specify either the FARGATE or\n\t\t\t\tFARGATE_SPOT capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be\n\t\t\tused.

\n

The PutClusterCapacityProviders API operation is used to update the list of\n\t\t\tavailable capacity providers for a cluster after the cluster is created.

" } }, "platformVersion": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The platform version that the tasks in the task set uses. A platform version is specified only for\n\t\t\ttasks using the Fargate launch type. If one isn't specified, the LATEST\n\t\t\tplatform version is used.

" + "smithy.api#documentation": "

The platform version that the tasks in the task set uses. A platform version is\n\t\t\tspecified only for tasks using the Fargate launch type. If one isn't\n\t\t\tspecified, the LATEST platform version is used.

" } }, "scale": { "target": "com.amazonaws.ecs#Scale", "traits": { - "smithy.api#documentation": "

A floating-point percentage of the desired number of tasks to place and keep running in the task\n\t\t\tset.

" + "smithy.api#documentation": "

A floating-point percentage of the desired number of tasks to place and keep running\n\t\t\tin the task set.

" } }, "clientToken": { @@ -3619,7 +3619,7 @@ "tags": { "target": "com.amazonaws.ecs#Tags", "traits": { - "smithy.api#documentation": "

The metadata that you apply to the task set to help you categorize and organize them. Each tag\n\t\t\tconsists of a key and an optional value. You define both. When a service is deleted, the tags are\n\t\t\tdeleted.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" + "smithy.api#documentation": "

The metadata that you apply to the task set to help you categorize and organize them.\n\t\t\tEach tag consists of a key and an optional value. You define both. When a service is\n\t\t\tdeleted, the tags are deleted.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" } } }, @@ -3633,7 +3633,7 @@ "taskSet": { "target": "com.amazonaws.ecs#TaskSet", "traits": { - "smithy.api#documentation": "

Information about a set of Amazon ECS tasks in either an CodeDeploy or an EXTERNAL deployment.\n\t\t\tA task set includes details such as the desired number of tasks, how many tasks are running, and\n\t\t\twhether the task set serves production traffic.

" + "smithy.api#documentation": "

Information about a set of Amazon ECS tasks in either an CodeDeploy or an\n\t\t\t\tEXTERNAL deployment. A task set includes details such as the desired\n\t\t\tnumber of tasks, how many tasks are running, and whether the task set serves production\n\t\t\ttraffic.

" } } }, @@ -3647,18 +3647,18 @@ "before": { "target": "com.amazonaws.ecs#Timestamp", "traits": { - "smithy.api#documentation": "

Include service deployments in the result that were created before this time. The format is yyyy-MM-dd\n\t\t\tHH:mm:ss.SSSSSS.

" + "smithy.api#documentation": "

Include service deployments in the result that were created before this time. The\n\t\t\tformat is yyyy-MM-dd HH:mm:ss.SSSSSS.

" } }, "after": { "target": "com.amazonaws.ecs#Timestamp", "traits": { - "smithy.api#documentation": "

Include service deployments in the result that were created after this time. The format is yyyy-MM-dd\n\t\t\tHH:mm:ss.SSSSSS.

" + "smithy.api#documentation": "

Include service deployments in the result that were created after this time. The\n\t\t\tformat is yyyy-MM-dd HH:mm:ss.SSSSSS.

" } } }, "traits": { - "smithy.api#documentation": "

The optional filter to narrow the ListServiceDeployment results.

\n

If you do not specify a value, service deployments that were created before the current\n\t\t\ttime are included in the result.

" + "smithy.api#documentation": "

The optional filter to narrow the ListServiceDeployment results.

\n

If you do not specify a value, service deployments that were created before the\n\t\t\tcurrent time are included in the result.

" } }, "com.amazonaws.ecs#DeleteAccountSetting": { @@ -3681,7 +3681,7 @@ } ], "traits": { - "smithy.api#documentation": "

Disables an account setting for a specified user, role, or the root user for an account.

", + "smithy.api#documentation": "

Disables an account setting for a specified user, role, or the root user for an\n\t\t\taccount.

", "smithy.api#examples": [ { "title": "To delete the account settings for a specific IAM user or IAM role", @@ -3721,14 +3721,14 @@ "name": { "target": "com.amazonaws.ecs#SettingName", "traits": { - "smithy.api#documentation": "

The resource name to disable the account setting for. If serviceLongArnFormat is\n\t\t\tspecified, the ARN for your Amazon ECS services is affected. If taskLongArnFormat is\n\t\t\tspecified, the ARN and resource ID for your Amazon ECS tasks is affected. If\n\t\t\t\tcontainerInstanceLongArnFormat is specified, the ARN and resource ID for your Amazon ECS\n\t\t\tcontainer instances is affected. If awsvpcTrunking is specified, the ENI limit for your\n\t\t\tAmazon ECS container instances is affected.

", + "smithy.api#documentation": "

The resource name to disable the account setting for. If\n\t\t\t\tserviceLongArnFormat is specified, the ARN for your Amazon ECS services is\n\t\t\taffected. If taskLongArnFormat is specified, the ARN and resource ID for\n\t\t\tyour Amazon ECS tasks is affected. If containerInstanceLongArnFormat is\n\t\t\tspecified, the ARN and resource ID for your Amazon ECS container instances is affected. If\n\t\t\t\tawsvpcTrunking is specified, the ENI limit for your Amazon ECS container\n\t\t\tinstances is affected.

", "smithy.api#required": {} } }, "principalArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the principal. It can be an user, role, or the root user. If you\n\t\t\tspecify the root user, it disables the account setting for all users, roles, and the root user of the account\n\t\t\tunless a user or role explicitly overrides these settings. If this field is omitted, the setting is\n\t\t\tchanged only for the authenticated user.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the principal. It can be an user, role, or the\n\t\t\troot user. If you specify the root user, it disables the account setting for all users, roles,\n\t\t\tand the root user of the account unless a user or role explicitly overrides these settings.\n\t\t\tIf this field is omitted, the setting is changed only for the authenticated user.

" } } }, @@ -3779,13 +3779,13 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that contains the resource to delete attributes.\n\t\t\tIf you do not specify a cluster, the default cluster is assumed.

" + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that contains the resource to delete\n\t\t\tattributes. If you do not specify a cluster, the default cluster is assumed.

" } }, "attributes": { "target": "com.amazonaws.ecs#Attributes", "traits": { - "smithy.api#documentation": "

The attributes to delete from your resource. You can specify up to 10 attributes for each request.\n\t\t\tFor custom attributes, specify the attribute name and target ID, but don't specify the value. If you\n\t\t\tspecify the target ID using the short form, you must also specify the target type.

", + "smithy.api#documentation": "

The attributes to delete from your resource. You can specify up to 10 attributes for\n\t\t\teach request. For custom attributes, specify the attribute name and target ID, but don't\n\t\t\tspecify the value. If you specify the target ID using the short form, you must also\n\t\t\tspecify the target type.

", "smithy.api#required": {} } } @@ -3828,7 +3828,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the specified capacity provider.

\n \n

The FARGATE and FARGATE_SPOT capacity providers are reserved and can't\n\t\t\t\tbe deleted. You can disassociate them from a cluster using either PutClusterCapacityProviders or by deleting the cluster.

\n
\n

Prior to a capacity provider being deleted, the capacity provider must be removed from the capacity\n\t\t\tprovider strategy from all services. The UpdateService API can be used to\n\t\t\tremove a capacity provider from a service's capacity provider strategy. When updating a service, the\n\t\t\t\tforceNewDeployment option can be used to ensure that any tasks using the Amazon EC2\n\t\t\tinstance capacity provided by the capacity provider are transitioned to use the capacity from the\n\t\t\tremaining capacity providers. Only capacity providers that aren't associated with a cluster can be\n\t\t\tdeleted. To remove a capacity provider from a cluster, you can either use PutClusterCapacityProviders or delete the cluster.

" + "smithy.api#documentation": "

Deletes the specified capacity provider.

\n \n

The FARGATE and FARGATE_SPOT capacity providers are\n\t\t\t\treserved and can't be deleted. You can disassociate them from a cluster using either\n\t\t\t\t\tPutClusterCapacityProviders or by deleting the cluster.

\n
\n

Prior to a capacity provider being deleted, the capacity provider must be removed from\n\t\t\tthe capacity provider strategy from all services. The UpdateService API\n\t\t\tcan be used to remove a capacity provider from a service's capacity provider strategy.\n\t\t\tWhen updating a service, the forceNewDeployment option can be used to\n\t\t\tensure that any tasks using the Amazon EC2 instance capacity provided by the capacity\n\t\t\tprovider are transitioned to use the capacity from the remaining capacity providers.\n\t\t\tOnly capacity providers that aren't associated with a cluster can be deleted. To remove\n\t\t\ta capacity provider from a cluster, you can either use PutClusterCapacityProviders or delete the cluster.

" } }, "com.amazonaws.ecs#DeleteCapacityProviderRequest": { @@ -3895,7 +3895,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the specified cluster. The cluster transitions to the INACTIVE state. Clusters\n\t\t\twith an INACTIVE status might remain discoverable in your account for a period of time.\n\t\t\tHowever, this behavior is subject to change in the future. We don't recommend that you rely on\n\t\t\t\tINACTIVE clusters persisting.

\n

You must deregister all container instances from this cluster before you may delete it. You can list\n\t\t\tthe container instances in a cluster with ListContainerInstances\n\t\t\tand deregister them with DeregisterContainerInstance.

", + "smithy.api#documentation": "

Deletes the specified cluster. The cluster transitions to the INACTIVE\n\t\t\tstate. Clusters with an INACTIVE status might remain discoverable in your\n\t\t\taccount for a period of time. However, this behavior is subject to change in the future.\n\t\t\tWe don't recommend that you rely on INACTIVE clusters persisting.

\n

You must deregister all container instances from this cluster before you may delete\n\t\t\tit. You can list the container instances in a cluster with ListContainerInstances and deregister them with DeregisterContainerInstance.

", "smithy.api#examples": [ { "title": "To delete an empty cluster", @@ -3973,7 +3973,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a specified service within a cluster. You can delete a service if you have no running tasks\n\t\t\tin it and the desired task count is zero. If the service is actively maintaining tasks, you can't\n\t\t\tdelete it, and you must update the service to a desired task count of zero. For more information, see\n\t\t\t\tUpdateService.

\n \n

When you delete a service, if there are still running tasks that require cleanup, the service\n\t\t\t\tstatus moves from ACTIVE to DRAINING, and the service is no longer\n\t\t\t\tvisible in the console or in the ListServices API operation.\n\t\t\t\tAfter all tasks have transitioned to either STOPPING or STOPPED status,\n\t\t\t\tthe service status moves from DRAINING to INACTIVE. Services in the\n\t\t\t\t\tDRAINING or INACTIVE status can still be viewed with the DescribeServices API operation. However, in the future, INACTIVE services\n\t\t\t\tmay be cleaned up and purged from Amazon ECS record keeping, and DescribeServices calls on\n\t\t\t\tthose services return a ServiceNotFoundException error.

\n
\n \n

If you attempt to create a new service with the same name as an existing service in either\n\t\t\t\t\tACTIVE or DRAINING status, you receive an error.

\n
", + "smithy.api#documentation": "

Deletes a specified service within a cluster. You can delete a service if you have no\n\t\t\trunning tasks in it and the desired task count is zero. If the service is actively\n\t\t\tmaintaining tasks, you can't delete it, and you must update the service to a desired\n\t\t\ttask count of zero. For more information, see UpdateService.

\n \n

When you delete a service, if there are still running tasks that require cleanup,\n\t\t\t\tthe service status moves from ACTIVE to DRAINING, and the\n\t\t\t\tservice is no longer visible in the console or in the ListServices\n\t\t\t\tAPI operation. After all tasks have transitioned to either STOPPING or\n\t\t\t\t\tSTOPPED status, the service status moves from DRAINING\n\t\t\t\tto INACTIVE. Services in the DRAINING or\n\t\t\t\t\tINACTIVE status can still be viewed with the DescribeServices API operation. However, in the future,\n\t\t\t\t\tINACTIVE services may be cleaned up and purged from Amazon ECS record\n\t\t\t\tkeeping, and DescribeServices calls on those services return a\n\t\t\t\t\tServiceNotFoundException error.

\n
\n \n

If you attempt to create a new service with the same name as an existing service\n\t\t\t\tin either ACTIVE or DRAINING status, you receive an\n\t\t\t\terror.

\n
", "smithy.api#examples": [ { "title": "To delete a service", @@ -4005,7 +4005,7 @@ "force": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

If true, allows you to delete a service even if it wasn't scaled down to zero tasks.\n\t\t\tIt's only necessary to use this if the service uses the REPLICA scheduling\n\t\t\tstrategy.

" + "smithy.api#documentation": "

If true, allows you to delete a service even if it wasn't scaled down to\n\t\t\tzero tasks. It's only necessary to use this if the service uses the REPLICA\n\t\t\tscheduling strategy.

" } } }, @@ -4050,7 +4050,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes one or more task definitions.

\n

You must deregister a task definition revision before you delete it. For more information, see DeregisterTaskDefinition.

\n

When you delete a task definition revision, it is immediately transitions from the\n\t\t\t\tINACTIVE to DELETE_IN_PROGRESS. Existing tasks and services that\n\t\t\treference a DELETE_IN_PROGRESS task definition revision continue to run without\n\t\t\tdisruption. Existing services that reference a DELETE_IN_PROGRESS task definition revision\n\t\t\tcan still scale up or down by modifying the service's desired count.

\n

You can't use a DELETE_IN_PROGRESS task definition revision to run new tasks or create\n\t\t\tnew services. You also can't update an existing service to reference a DELETE_IN_PROGRESS\n\t\t\ttask definition revision.

\n

A task definition revision will stay in DELETE_IN_PROGRESS status until all the\n\t\t\tassociated tasks and services have been terminated.

\n

When you delete all INACTIVE task definition revisions, the task definition name is not\n\t\t\tdisplayed in the console and not returned in the API. If a task definition revisions are in the\n\t\t\t\tDELETE_IN_PROGRESS state, the task definition name is displayed in the console and\n\t\t\treturned in the API. The task definition name is retained by Amazon ECS and the revision is incremented the\n\t\t\tnext time you create a task definition with that name.

" + "smithy.api#documentation": "

Deletes one or more task definitions.

\n

You must deregister a task definition revision before you delete it. For more\n\t\t\tinformation, see DeregisterTaskDefinition.

\n

When you delete a task definition revision, it is immediately transitions from the\n\t\t\t\tINACTIVE to DELETE_IN_PROGRESS. Existing tasks and\n\t\t\tservices that reference a DELETE_IN_PROGRESS task definition revision\n\t\t\tcontinue to run without disruption. Existing services that reference a\n\t\t\t\tDELETE_IN_PROGRESS task definition revision can still scale up or down\n\t\t\tby modifying the service's desired count.

\n

You can't use a DELETE_IN_PROGRESS task definition revision to run new\n\t\t\ttasks or create new services. You also can't update an existing service to reference a\n\t\t\t\tDELETE_IN_PROGRESS task definition revision.

\n

A task definition revision will stay in DELETE_IN_PROGRESS status until\n\t\t\tall the associated tasks and services have been terminated.

\n

When you delete all INACTIVE task definition revisions, the task\n\t\t\tdefinition name is not displayed in the console and not returned in the API. If a task\n\t\t\tdefinition revisions are in the DELETE_IN_PROGRESS state, the task\n\t\t\tdefinition name is displayed in the console and returned in the API. The task definition\n\t\t\tname is retained by Amazon ECS and the revision is incremented the next time you create a\n\t\t\ttask definition with that name.

" } }, "com.amazonaws.ecs#DeleteTaskDefinitionsRequest": { @@ -4059,7 +4059,7 @@ "taskDefinitions": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The family and revision (family:revision) or full Amazon Resource Name (ARN) of\n\t\t\tthe task definition to delete. You must specify a revision.

\n

You can specify up to 10 task definitions as a comma separated list.

", + "smithy.api#documentation": "

The family and revision (family:revision) or\n\t\t\tfull Amazon Resource Name (ARN) of the task definition to delete. You must specify a\n\t\t\t\trevision.

\n

You can specify up to 10 task definitions as a comma separated list.

", "smithy.api#required": {} } } @@ -4126,7 +4126,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a specified task set within a service. This is used when a service uses the\n\t\t\t\tEXTERNAL deployment controller type. For more information, see Amazon ECS deployment\n\t\t\t\ttypes in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Deletes a specified task set within a service. This is used when a service uses the\n\t\t\t\tEXTERNAL deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#DeleteTaskSetRequest": { @@ -4135,14 +4135,14 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task set found in to\n\t\t\tdelete.

", + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task\n\t\t\tset found in to delete.

", "smithy.api#required": {} } }, "service": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the service that hosts the task set to delete.

", + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the service that hosts the task set to\n\t\t\tdelete.

", "smithy.api#required": {} } }, @@ -4156,7 +4156,7 @@ "force": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

If true, you can delete a task set even if it hasn't been scaled down to zero.

" + "smithy.api#documentation": "

If true, you can delete a task set even if it hasn't been scaled down to\n\t\t\tzero.

" } } }, @@ -4190,13 +4190,13 @@ "status": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The status of the deployment. The following describes each state.

\n
\n
PRIMARY
\n
\n

The most recent deployment of a service.

\n
\n
ACTIVE
\n
\n

A service deployment that still has running tasks, but are in the process of being\n\t\t\t\t\t\treplaced with a new PRIMARY deployment.

\n
\n
INACTIVE
\n
\n

A deployment that has been completely replaced.

\n
\n
" + "smithy.api#documentation": "

The status of the deployment. The following describes each state.

\n
\n
PRIMARY
\n
\n

The most recent deployment of a service.

\n
\n
ACTIVE
\n
\n

A service deployment that still has running tasks, but are in the process\n\t\t\t\t\t\tof being replaced with a new PRIMARY deployment.

\n
\n
INACTIVE
\n
\n

A deployment that has been completely replaced.

\n
\n
" } }, "taskDefinition": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The most recent task definition that was specified for the tasks in the service to use.

" + "smithy.api#documentation": "

The most recent task definition that was specified for the tasks in the service to\n\t\t\tuse.

" } }, "desiredCount": { @@ -4210,21 +4210,21 @@ "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The number of tasks in the deployment that are in the PENDING status.

" + "smithy.api#documentation": "

The number of tasks in the deployment that are in the PENDING\n\t\t\tstatus.

" } }, "runningCount": { "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The number of tasks in the deployment that are in the RUNNING status.

" + "smithy.api#documentation": "

The number of tasks in the deployment that are in the RUNNING\n\t\t\tstatus.

" } }, "failedTasks": { "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The number of consecutively failed tasks in the deployment. A task is considered a failure if the\n\t\t\tservice scheduler can't launch the task, the task doesn't transition to a RUNNING state,\n\t\t\tor if it fails any of its defined health checks and is stopped.

\n \n

Once a service deployment has one or more successfully running tasks, the failed task count\n\t\t\t\tresets to zero and stops being evaluated.

\n
" + "smithy.api#documentation": "

The number of consecutively failed tasks in the deployment. A task is considered a\n\t\t\tfailure if the service scheduler can't launch the task, the task doesn't transition to a\n\t\t\t\tRUNNING state, or if it fails any of its defined health checks and is\n\t\t\tstopped.

\n \n

Once a service deployment has one or more successfully running tasks, the failed\n\t\t\t\ttask count resets to zero and stops being evaluated.

\n
" } }, "createdAt": { @@ -4248,19 +4248,19 @@ "launchType": { "target": "com.amazonaws.ecs#LaunchType", "traits": { - "smithy.api#documentation": "

The launch type the tasks in the service are using. For more information, see Amazon ECS Launch\n\t\t\t\tTypes in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The launch type the tasks in the service are using. For more information, see Amazon ECS\n\t\t\t\tLaunch Types in the Amazon Elastic Container Service Developer Guide.

" } }, "platformVersion": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The platform version that your tasks in the service run on. A platform version is only specified for\n\t\t\ttasks using the Fargate launch type. If one isn't specified, the LATEST\n\t\t\tplatform version is used. For more information, see Fargate Platform Versions in\n\t\t\tthe Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The platform version that your tasks in the service run on. A platform version is only\n\t\t\tspecified for tasks using the Fargate launch type. If one isn't specified,\n\t\t\tthe LATEST platform version is used. For more information, see Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" } }, "platformFamily": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The operating system that your tasks in the service, or tasks are running on. A platform family is\n\t\t\tspecified only for tasks using the Fargate launch type.

\n

All tasks that run as part of this service must use the same platformFamily value as\n\t\t\tthe service, for example, LINUX..

" + "smithy.api#documentation": "

The operating system that your tasks in the service, or tasks are running on. A\n\t\t\tplatform family is specified only for tasks using the Fargate launch type.

\n

All tasks that run as part of this service must use the same\n\t\t\t\tplatformFamily value as the service, for example, \n\t\t\tLINUX..

" } }, "networkConfiguration": { @@ -4272,7 +4272,7 @@ "rolloutState": { "target": "com.amazonaws.ecs#DeploymentRolloutState", "traits": { - "smithy.api#documentation": "\n

The rolloutState of a service is only returned for services that use the rolling\n\t\t\t\tupdate (ECS) deployment type that aren't behind a Classic Load Balancer.

\n
\n

The rollout state of the deployment. When a service deployment is started, it begins in an\n\t\t\t\tIN_PROGRESS state. When the service reaches a steady state, the deployment transitions\n\t\t\tto a COMPLETED state. If the service fails to reach a steady state and circuit breaker is\n\t\t\tturned on, the deployment transitions to a FAILED state. A deployment in\n\t\t\t\tFAILED state doesn't launch any new tasks. For more information, see DeploymentCircuitBreaker.

" + "smithy.api#documentation": "\n

The rolloutState of a service is only returned for services that use\n\t\t\t\tthe rolling update (ECS) deployment type that aren't behind a\n\t\t\t\tClassic Load Balancer.

\n
\n

The rollout state of the deployment. When a service deployment is started, it begins\n\t\t\tin an IN_PROGRESS state. When the service reaches a steady state, the\n\t\t\tdeployment transitions to a COMPLETED state. If the service fails to reach\n\t\t\ta steady state and circuit breaker is turned on, the deployment transitions to a\n\t\t\t\tFAILED state. A deployment in FAILED state doesn't launch\n\t\t\tany new tasks. For more information, see DeploymentCircuitBreaker.

" } }, "rolloutStateReason": { @@ -4284,19 +4284,19 @@ "serviceConnectConfiguration": { "target": "com.amazonaws.ecs#ServiceConnectConfiguration", "traits": { - "smithy.api#documentation": "

The details of the Service Connect configuration that's used by this deployment. Compare the\n\t\t\tconfiguration between multiple deployments when troubleshooting issues with new deployments.

\n

The configuration for this service to discover and connect to\n\tservices, and be discovered by, and connected from, other services within a namespace.

\n

Tasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The details of the Service Connect configuration that's used by this deployment.\n\t\t\tCompare the configuration between multiple deployments when troubleshooting issues with\n\t\t\tnew deployments.

\n

The configuration for this service to discover and connect to\n\tservices, and be discovered by, and connected from, other services within a namespace.

\n

Tasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.

" } }, "serviceConnectResources": { "target": "com.amazonaws.ecs#ServiceConnectServiceResourceList", "traits": { - "smithy.api#documentation": "

The list of Service Connect resources that are associated with this deployment. Each list entry maps\n\t\t\ta discovery name to a Cloud Map service name.

" + "smithy.api#documentation": "

The list of Service Connect resources that are associated with this deployment. Each\n\t\t\tlist entry maps a discovery name to a Cloud Map service name.

" } }, "volumeConfigurations": { "target": "com.amazonaws.ecs#ServiceVolumeConfigurations", "traits": { - "smithy.api#documentation": "

The details of the volume that was configuredAtLaunch. You can configure different\n\t\t\tsettings like the size, throughput, volumeType, and ecryption in ServiceManagedEBSVolumeConfiguration. The name of the volume must match the\n\t\t\t\tname from the task definition.

" + "smithy.api#documentation": "

The details of the volume that was configuredAtLaunch. You can configure\n\t\t\tdifferent settings like the size, throughput, volumeType, and ecryption in ServiceManagedEBSVolumeConfiguration. The name of the volume\n\t\t\tmust match the name from the task definition.

" } }, "fargateEphemeralStorage": { @@ -4330,7 +4330,7 @@ "target": "com.amazonaws.ecs#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Determines whether to configure Amazon ECS to roll back the service if a service deployment fails. If\n\t\t\trollback is used, when a service deployment fails, the service is rolled back to the last deployment\n\t\t\tthat completed successfully.

", + "smithy.api#documentation": "

Determines whether to configure Amazon ECS to roll back the service if a service deployment\n\t\t\tfails. If rollback is used, when a service deployment fails, the service is rolled back\n\t\t\tto the last deployment that completed successfully.

", "smithy.api#required": {} } }, @@ -4338,13 +4338,13 @@ "target": "com.amazonaws.ecs#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Determines whether to use the CloudWatch alarm option in the service deployment process.

", + "smithy.api#documentation": "

Determines whether to use the CloudWatch alarm option in the service deployment\n\t\t\tprocess.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

One of the methods which provide a way for you to quickly identify when a deployment has failed, and\n\t\t\tthen to optionally roll back the failure to the last working deployment.

\n

When the alarms are generated, Amazon ECS sets the service deployment to failed. Set the rollback\n\t\t\tparameter to have Amazon ECS to roll back your service to the last completed deployment after a\n\t\t\tfailure.

\n

You can only use the DeploymentAlarms method to detect failures when the\n\t\t\t\tDeploymentController is set to ECS (rolling update).

\n

For more information, see Rolling update in the\n\t\t\t\t\n Amazon Elastic Container Service Developer Guide\n .

" + "smithy.api#documentation": "

One of the methods which provide a way for you to quickly identify when a deployment\n\t\t\thas failed, and then to optionally roll back the failure to the last working\n\t\t\tdeployment.

\n

When the alarms are generated, Amazon ECS sets the service deployment to failed. Set the\n\t\t\trollback parameter to have Amazon ECS to roll back your service to the last completed\n\t\t\tdeployment after a failure.

\n

You can only use the DeploymentAlarms method to detect failures when the\n\t\t\t\tDeploymentController is set to ECS (rolling\n\t\t\tupdate).

\n

For more information, see Rolling\n\t\t\t\tupdate in the \n Amazon Elastic Container Service Developer Guide\n .

" } }, "com.amazonaws.ecs#DeploymentCircuitBreaker": { @@ -4362,13 +4362,13 @@ "target": "com.amazonaws.ecs#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Determines whether to configure Amazon ECS to roll back the service if a service deployment fails. If\n\t\t\trollback is on, when a service deployment fails, the service is rolled back to the last deployment that\n\t\t\tcompleted successfully.

", + "smithy.api#documentation": "

Determines whether to configure Amazon ECS to roll back the service if a service deployment\n\t\t\tfails. If rollback is on, when a service deployment fails, the service is rolled back to\n\t\t\tthe last deployment that completed successfully.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "\n

The deployment circuit breaker can only be used for services using the rolling update\n\t\t\t\t\t(ECS) deployment type.

\n
\n

The deployment circuit breaker determines whether a service\n\t\t\tdeployment will fail if the service can't reach a steady state. If it is turned on, a service\n\t\t\tdeployment will transition to a failed state and stop launching new tasks. You can also configure Amazon ECS\n\t\t\tto roll back your service to the last completed deployment after a failure. For more information, see\n\t\t\t\tRolling\n\t\t\t\tupdate in the Amazon Elastic Container Service Developer Guide.

\n

For more information about API failure reasons, see API failure reasons in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "\n

The deployment circuit breaker can only be used for services using the rolling\n\t\t\t\tupdate (ECS) deployment type.

\n
\n

The deployment circuit breaker determines whether a\n\t\t\tservice deployment will fail if the service can't reach a steady state. If it is turned\n\t\t\ton, a service deployment will transition to a failed state and stop launching new tasks.\n\t\t\tYou can also configure Amazon ECS to roll back your service to the last completed deployment\n\t\t\tafter a failure. For more information, see Rolling\n\t\t\t\tupdate in the Amazon Elastic Container Service Developer Guide.

\n

For more information about API failure reasons, see API failure\n\t\t\t\treasons in the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#DeploymentConfiguration": { @@ -4377,19 +4377,19 @@ "deploymentCircuitBreaker": { "target": "com.amazonaws.ecs#DeploymentCircuitBreaker", "traits": { - "smithy.api#documentation": "\n

The deployment circuit breaker can only be used for services using the rolling update\n\t\t\t\t\t(ECS) deployment type.

\n
\n

The deployment circuit breaker determines whether a service\n\t\t\tdeployment will fail if the service can't reach a steady state. If you use the deployment circuit\n\t\t\tbreaker, a service deployment will transition to a failed state and stop launching new tasks. If you\n\t\t\tuse the rollback option, when a service deployment fails, the service is rolled back to the last\n\t\t\tdeployment that completed successfully. For more information, see Rolling update in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide\n

" + "smithy.api#documentation": "\n

The deployment circuit breaker can only be used for services using the rolling\n\t\t\t\tupdate (ECS) deployment type.

\n
\n

The deployment circuit breaker determines whether a\n\t\t\tservice deployment will fail if the service can't reach a steady state. If you use the\n\t\t\tdeployment circuit breaker, a service deployment will transition to a failed state and\n\t\t\tstop launching new tasks. If you use the rollback option, when a service deployment\n\t\t\tfails, the service is rolled back to the last deployment that completed successfully.\n\t\t\tFor more information, see Rolling\n\t\t\t\tupdate in the Amazon Elastic Container Service Developer\n\t\t\t\tGuide\n

" } }, "maximumPercent": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

If a service is using the rolling update (ECS) deployment type, the\n\t\t\t\tmaximumPercent parameter represents an upper limit on the number of your service's\n\t\t\ttasks that are allowed in the RUNNING or PENDING state during a deployment,\n\t\t\tas a percentage of the desiredCount (rounded down to the nearest integer). This parameter\n\t\t\tenables you to define the deployment batch size. For example, if your service is using the\n\t\t\t\tREPLICA service scheduler and has a desiredCount of four tasks and a\n\t\t\t\tmaximumPercent value of 200%, the scheduler may start four new tasks before stopping\n\t\t\tthe four older tasks (provided that the cluster resources required to do this are available). The\n\t\t\tdefault maximumPercent value for a service using the REPLICA service\n\t\t\tscheduler is 200%.

\n

If a service is using either the blue/green (CODE_DEPLOY) or EXTERNAL\n\t\t\tdeployment types, and tasks in the service use the EC2 launch type, the maximum percent value is set to the default value. The maximum percent value is used to define the upper limit on the number of the tasks in\n\t\t\tthe service that remain in the RUNNING state while the container instances are in the\n\t\t\t\tDRAINING state.

\n \n

You can't specify a custom maximumPercent value for a service that uses either the\n\t\t\t\tblue/green (CODE_DEPLOY) or EXTERNAL deployment types and has tasks that\n\t\t\t\tuse the EC2 launch type.

\n
\n

If the tasks in the service use the Fargate launch type, the maximum percent value is\n\t\t\tnot used, although it is returned when describing your service.

" + "smithy.api#documentation": "

If a service is using the rolling update (ECS) deployment type, the\n\t\t\t\tmaximumPercent parameter represents an upper limit on the number of\n\t\t\tyour service's tasks that are allowed in the RUNNING or\n\t\t\t\tPENDING state during a deployment, as a percentage of the\n\t\t\t\tdesiredCount (rounded down to the nearest integer). This parameter\n\t\t\tenables you to define the deployment batch size. For example, if your service is using\n\t\t\tthe REPLICA service scheduler and has a desiredCount of four\n\t\t\ttasks and a maximumPercent value of 200%, the scheduler may start four new\n\t\t\ttasks before stopping the four older tasks (provided that the cluster resources required\n\t\t\tto do this are available). The default maximumPercent value for a service\n\t\t\tusing the REPLICA service scheduler is 200%.

\n

The Amazon ECS scheduler uses this parameter to replace unhealthy tasks by starting\n\t\t\treplacement tasks first and then stopping the unhealthy tasks, as long as cluster\n\t\t\tresources for starting replacement tasks are available. For more information about how\n\t\t\tthe scheduler replaces unhealthy tasks, see Amazon ECS\n\t\t\tservices.

\n

If a service is using either the blue/green (CODE_DEPLOY) or\n\t\t\t\tEXTERNAL deployment types, and tasks in the service use the\n\t\t\tEC2 launch type, the maximum percent\n\t\t\tvalue is set to the default value. The maximum percent\n\t\t\tvalue is used to define the upper limit on the number of the tasks in the service that\n\t\t\tremain in the RUNNING state while the container instances are in the\n\t\t\t\tDRAINING state.

\n \n

You can't specify a custom maximumPercent value for a service that\n\t\t\t\tuses either the blue/green (CODE_DEPLOY) or EXTERNAL\n\t\t\t\tdeployment types and has tasks that use the EC2 launch type.

\n
\n

If the service uses either the blue/green (CODE_DEPLOY) or EXTERNAL\n\t\t\tdeployment types, and the tasks in the service use the Fargate launch type, the maximum\n\t\t\tpercent value is not used. The value is still returned when describing your service.

" } }, "minimumHealthyPercent": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

If a service is using the rolling update (ECS) deployment type, the\n\t\t\t\tminimumHealthyPercent represents a lower limit on the number of your service's tasks\n\t\t\tthat must remain in the RUNNING state during a deployment, as a percentage of the\n\t\t\t\tdesiredCount (rounded up to the nearest integer). This parameter enables you to deploy\n\t\t\twithout using additional cluster capacity. For example, if your service has a desiredCount\n\t\t\tof four tasks and a minimumHealthyPercent of 50%, the service scheduler may stop two\n\t\t\texisting tasks to free up cluster capacity before starting two new tasks.

\n

For services that do not use a load balancer, the following should be\n\t\t\tnoted:

\n
    \n
  • \n

    A service is considered healthy if all essential containers within the tasks in the service\n\t\t\t\t\tpass their health checks.

    \n
  • \n
  • \n

    If a task has no essential containers with a health check defined, the service scheduler will\n\t\t\t\t\twait for 40 seconds after a task reaches a RUNNING state before the task is\n\t\t\t\t\tcounted towards the minimum healthy percent total.

    \n
  • \n
  • \n

    If a task has one or more essential containers with a health check defined, the service\n\t\t\t\t\tscheduler will wait for the task to reach a healthy status before counting it towards the\n\t\t\t\t\tminimum healthy percent total. A task is considered healthy when all essential containers\n\t\t\t\t\twithin the task have passed their health checks. The amount of time the service scheduler can\n\t\t\t\t\twait for is determined by the container health check settings.

    \n
  • \n
\n

For services that do use a load balancer, the following should be noted:

\n
    \n
  • \n

    If a task has no essential containers with a health check defined, the service scheduler will\n\t\t\t\t\twait for the load balancer target group health check to return a healthy status before counting\n\t\t\t\t\tthe task towards the minimum healthy percent total.

    \n
  • \n
  • \n

    If a task has an essential container with a health check defined, the service scheduler will\n\t\t\t\t\twait for both the task to reach a healthy status and the load balancer target group health\n\t\t\t\t\tcheck to return a healthy status before counting the task towards the minimum healthy percent\n\t\t\t\t\ttotal.

    \n
  • \n
\n

The default value for a replica service for minimumHealthyPercent is 100%. The default\n\t\t\t\tminimumHealthyPercent value for a service using the DAEMON service\n\t\t\tschedule is 0% for the CLI, the Amazon Web Services SDKs, and the APIs and 50% for the Amazon Web Services Management Console.

\n

The minimum number of healthy tasks during a deployment is the desiredCount multiplied\n\t\t\tby the minimumHealthyPercent/100, rounded up to the nearest integer value.

\n

If a service is using either the blue/green (CODE_DEPLOY) or EXTERNAL\n\t\t\tdeployment types and is running tasks that use the EC2 launch type, the minimum healthy percent value is set to the default value. The minimum healthy percent value is used to define the lower limit on the\n\t\t\tnumber of the tasks in the service that remain in the RUNNING state while the container\n\t\t\tinstances are in the DRAINING state.

\n \n

You can't specify a custom minimumHealthyPercent value for a service that uses\n\t\t\t\teither the blue/green (CODE_DEPLOY) or EXTERNAL deployment types and has\n\t\t\t\ttasks that use the EC2 launch type.

\n
\n

If a service is using either the blue/green (CODE_DEPLOY) or EXTERNAL\n\t\t\tdeployment types and is running tasks that use the Fargate launch type, the minimum\n\t\t\thealthy percent value is not used, although it is returned when describing your service.

" + "smithy.api#documentation": "

If a service is using the rolling update (ECS) deployment type, the\n\t\t\t\tminimumHealthyPercent represents a lower limit on the number of your\n\t\t\tservice's tasks that must remain in the RUNNING state during a deployment,\n\t\t\tas a percentage of the desiredCount (rounded up to the nearest integer).\n\t\t\tThis parameter enables you to deploy without using additional cluster capacity. For\n\t\t\texample, if your service has a desiredCount of four tasks and a\n\t\t\t\tminimumHealthyPercent of 50%, the service scheduler may stop two\n\t\t\texisting tasks to free up cluster capacity before starting two new tasks.

\n

If any tasks are unhealthy and if maximumPercent doesn't allow the Amazon ECS\n\t\t\tscheduler to start replacement tasks, the scheduler stops the unhealthy tasks one-by-one\n\t\t\t— using the minimumHealthyPercent as a constraint — to clear up capacity to\n\t\t\tlaunch replacement tasks. For more information about how the scheduler replaces\n\t\t\tunhealthy tasks, see Amazon ECS services .

\n

For services that do not use a load balancer, the following\n\t\t\tshould be noted:

\n
    \n
  • \n

    A service is considered healthy if all essential containers within the tasks\n\t\t\t\t\tin the service pass their health checks.

    \n
  • \n
  • \n

    If a task has no essential containers with a health check defined, the service\n\t\t\t\t\tscheduler will wait for 40 seconds after a task reaches a RUNNING\n\t\t\t\t\tstate before the task is counted towards the minimum healthy percent\n\t\t\t\t\ttotal.

    \n
  • \n
  • \n

    If a task has one or more essential containers with a health check defined,\n\t\t\t\t\tthe service scheduler will wait for the task to reach a healthy status before\n\t\t\t\t\tcounting it towards the minimum healthy percent total. A task is considered\n\t\t\t\t\thealthy when all essential containers within the task have passed their health\n\t\t\t\t\tchecks. The amount of time the service scheduler can wait for is determined by\n\t\t\t\t\tthe container health check settings.

    \n
  • \n
\n

For services that do use a load balancer, the following should be\n\t\t\tnoted:

\n
    \n
  • \n

    If a task has no essential containers with a health check defined, the service\n\t\t\t\t\tscheduler will wait for the load balancer target group health check to return a\n\t\t\t\t\thealthy status before counting the task towards the minimum healthy percent\n\t\t\t\t\ttotal.

    \n
  • \n
  • \n

    If a task has an essential container with a health check defined, the service\n\t\t\t\t\tscheduler will wait for both the task to reach a healthy status and the load\n\t\t\t\t\tbalancer target group health check to return a healthy status before counting\n\t\t\t\t\tthe task towards the minimum healthy percent total.

    \n
  • \n
\n

The default value for a replica service for minimumHealthyPercent is\n\t\t\t100%. The default minimumHealthyPercent value for a service using the\n\t\t\t\tDAEMON service schedule is 0% for the CLI, the Amazon Web Services SDKs, and the\n\t\t\tAPIs and 50% for the Amazon Web Services Management Console.

\n

The minimum number of healthy tasks during a deployment is the\n\t\t\t\tdesiredCount multiplied by the minimumHealthyPercent/100,\n\t\t\trounded up to the nearest integer value.

\n

If a service is using either the blue/green (CODE_DEPLOY) or\n\t\t\t\tEXTERNAL deployment types and is running tasks that use the\n\t\t\tEC2 launch type, the minimum healthy\n\t\t\t\tpercent value is set to the default value. The minimum healthy percent value is used to define the lower limit on the\n\t\t\tnumber of the tasks in the service that remain in the RUNNING state while\n\t\t\tthe container instances are in the DRAINING state.

\n \n

You can't specify a custom minimumHealthyPercent value for a service\n\t\t\t\tthat uses either the blue/green (CODE_DEPLOY) or EXTERNAL\n\t\t\t\tdeployment types and has tasks that use the EC2 launch type.

\n
\n

If a service is using either the blue/green (CODE_DEPLOY) or\n\t\t\t\tEXTERNAL deployment types and is running tasks that use the\n\t\t\tFargate launch type, the minimum healthy percent value is not used,\n\t\t\talthough it is returned when describing your service.

" } }, "alarms": { @@ -4400,7 +4400,7 @@ } }, "traits": { - "smithy.api#documentation": "

Optional deployment parameters that control how many tasks run during a deployment and the ordering\n\t\t\tof stopping and starting tasks.

" + "smithy.api#documentation": "

Optional deployment parameters that control how many tasks run during a deployment and\n\t\t\tthe ordering of stopping and starting tasks.

" } }, "com.amazonaws.ecs#DeploymentController": { @@ -4409,7 +4409,7 @@ "type": { "target": "com.amazonaws.ecs#DeploymentControllerType", "traits": { - "smithy.api#documentation": "

The deployment controller type to use.

\n

There are three deployment controller types available:

\n
\n
ECS
\n
\n

The rolling update (ECS) deployment type involves replacing the current\n\t\t\t\t\t\trunning version of the container with the latest version. The number of containers Amazon ECS\n\t\t\t\t\t\tadds or removes from the service during a rolling update is controlled by adjusting the\n\t\t\t\t\t\tminimum and maximum number of healthy tasks allowed during a service deployment, as\n\t\t\t\t\t\tspecified in the DeploymentConfiguration.

\n

For more information about rolling deployments, see Deploy Amazon ECS services by replacing tasks in the Amazon Elastic Container Service Developer Guide.

\n
\n
CODE_DEPLOY
\n
\n

The blue/green (CODE_DEPLOY) deployment type uses the blue/green deployment\n\t\t\t\t\t\tmodel powered by CodeDeploy, which allows you to verify a new deployment of a service before\n\t\t\t\t\t\tsending production traffic to it.

\n

For more information about blue/green deployments, see Validate the state of an Amazon ECS service before deployment in the Amazon Elastic Container Service Developer Guide.

\n
\n
EXTERNAL
\n
\n

The external (EXTERNAL) deployment type enables you to use any third-party\n\t\t\t\t\t\tdeployment controller for full control over the deployment process for an Amazon ECS\n\t\t\t\t\t\tservice.

\n

For more information about external deployments, see Deploy Amazon ECS services using a third-party controller in the Amazon Elastic Container Service Developer Guide.

\n
\n
", + "smithy.api#documentation": "

The deployment controller type to use.

\n

There are three deployment controller types available:

\n
\n
ECS
\n
\n

The rolling update (ECS) deployment type involves replacing\n\t\t\t\t\t\tthe current running version of the container with the latest version. The\n\t\t\t\t\t\tnumber of containers Amazon ECS adds or removes from the service during a rolling\n\t\t\t\t\t\tupdate is controlled by adjusting the minimum and maximum number of healthy\n\t\t\t\t\t\ttasks allowed during a service deployment, as specified in the DeploymentConfiguration.

\n

For more information about rolling deployments, see Deploy\n\t\t\t\t\t\t\tAmazon ECS services by replacing tasks in the Amazon Elastic Container Service Developer Guide.

\n
\n
CODE_DEPLOY
\n
\n

The blue/green (CODE_DEPLOY) deployment type uses the\n\t\t\t\t\t\tblue/green deployment model powered by CodeDeploy, which allows you to verify a\n\t\t\t\t\t\tnew deployment of a service before sending production traffic to it.

\n

For more information about blue/green deployments, see Validate the state of an Amazon ECS service before deployment in\n\t\t\t\t\t\tthe Amazon Elastic Container Service Developer Guide.

\n
\n
EXTERNAL
\n
\n

The external (EXTERNAL) deployment type enables you to use\n\t\t\t\t\t\tany third-party deployment controller for full control over the deployment\n\t\t\t\t\t\tprocess for an Amazon ECS service.

\n

For more information about external deployments, see Deploy Amazon ECS services using a third-party controller in the\n\t\t\t\t\t\tAmazon Elastic Container Service Developer Guide.

\n
\n
", "smithy.api#required": {} } } @@ -4447,7 +4447,7 @@ "kmsKeyId": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

Specify an Key Management Service key ID to encrypt the ephemeral storage for deployment.

" + "smithy.api#documentation": "

Specify an Key Management Service key ID to encrypt the ephemeral storage for\n\t\t\tdeployment.

" } } }, @@ -4507,7 +4507,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deregisters an Amazon ECS container instance from the specified cluster. This instance is no longer\n\t\t\tavailable to run tasks.

\n

If you intend to use the container instance for some other purpose after deregistration, we recommend\n\t\t\tthat you stop all of the tasks running on the container instance before deregistration. That prevents\n\t\t\tany orphaned tasks from consuming resources.

\n

Deregistering a container instance removes the instance from a cluster, but it doesn't terminate the\n\t\t\tEC2 instance. If you are finished using the instance, be sure to terminate it in the Amazon EC2 console to\n\t\t\tstop billing.

\n \n

If you terminate a running container instance, Amazon ECS automatically deregisters the instance from\n\t\t\t\tyour cluster (stopped container instances or instances with disconnected agents aren't\n\t\t\t\tautomatically deregistered when terminated).

\n
", + "smithy.api#documentation": "

Deregisters an Amazon ECS container instance from the specified cluster. This instance is\n\t\t\tno longer available to run tasks.

\n

If you intend to use the container instance for some other purpose after\n\t\t\tderegistration, we recommend that you stop all of the tasks running on the container\n\t\t\tinstance before deregistration. That prevents any orphaned tasks from consuming\n\t\t\tresources.

\n

Deregistering a container instance removes the instance from a cluster, but it doesn't\n\t\t\tterminate the EC2 instance. If you are finished using the instance, be sure to terminate\n\t\t\tit in the Amazon EC2 console to stop billing.

\n \n

If you terminate a running container instance, Amazon ECS automatically deregisters the\n\t\t\t\tinstance from your cluster (stopped container instances or instances with\n\t\t\t\tdisconnected agents aren't automatically deregistered when terminated).

\n
", "smithy.api#examples": [ { "title": "To deregister a container instance from a cluster", @@ -4528,20 +4528,20 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instance to deregister.\n\t\t\tIf you do not specify a cluster, the default cluster is assumed.

" + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instance to\n\t\t\tderegister. If you do not specify a cluster, the default cluster is assumed.

" } }, "containerInstance": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The container instance ID or full ARN of the container instance to deregister. For more information\n\t\t\tabout the ARN format, see Amazon Resource Name (ARN)\n\t\t\tin the Amazon ECS Developer Guide.

", + "smithy.api#documentation": "

The container instance ID or full ARN of the container instance to deregister. For\n\t\t\tmore information about the ARN format, see Amazon Resource Name (ARN) in the Amazon ECS Developer Guide.

", "smithy.api#required": {} } }, "force": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

Forces the container instance to be deregistered. If you have tasks running on the container instance\n\t\t\twhen you deregister it with the force option, these tasks remain running until you\n\t\t\tterminate the instance or the tasks stop through some other means, but they're orphaned (no longer\n\t\t\tmonitored or accounted for by Amazon ECS). If an orphaned task on your container instance is part of an\n\t\t\tAmazon ECS service, then the service scheduler starts another copy of that task, on a different container\n\t\t\tinstance if possible.

\n

Any containers in orphaned service tasks that are registered with a Classic Load Balancer or an Application Load Balancer target group\n\t\t\tare deregistered. They begin connection draining according to the settings on the load balancer or\n\t\t\ttarget group.

" + "smithy.api#documentation": "

Forces the container instance to be deregistered. If you have tasks running on the\n\t\t\tcontainer instance when you deregister it with the force option, these\n\t\t\ttasks remain running until you terminate the instance or the tasks stop through some\n\t\t\tother means, but they're orphaned (no longer monitored or accounted for by Amazon ECS). If an\n\t\t\torphaned task on your container instance is part of an Amazon ECS service, then the service\n\t\t\tscheduler starts another copy of that task, on a different container instance if\n\t\t\tpossible.

\n

Any containers in orphaned service tasks that are registered with a Classic Load Balancer or an Application Load Balancer\n\t\t\ttarget group are deregistered. They begin connection draining according to the settings\n\t\t\ton the load balancer or target group.

" } } }, @@ -4583,7 +4583,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deregisters the specified task definition by family and revision. Upon deregistration, the task\n\t\t\tdefinition is marked as INACTIVE. Existing tasks and services that reference an\n\t\t\t\tINACTIVE task definition continue to run without disruption. Existing services that\n\t\t\treference an INACTIVE task definition can still scale up or down by modifying the\n\t\t\tservice's desired count. If you want to delete a task definition revision, you must first deregister\n\t\t\tthe task definition revision.

\n

You can't use an INACTIVE task definition to run new tasks or create new services, and\n\t\t\tyou can't update an existing service to reference an INACTIVE task definition. However,\n\t\t\tthere may be up to a 10-minute window following deregistration where these restrictions have not yet\n\t\t\ttaken effect.

\n \n

At this time, INACTIVE task definitions remain discoverable in your account\n\t\t\t\tindefinitely. However, this behavior is subject to change in the future. We don't recommend that\n\t\t\t\tyou rely on INACTIVE task definitions persisting beyond the lifecycle of any\n\t\t\t\tassociated tasks and services.

\n
\n

You must deregister a task definition revision before you delete it. For more information, see DeleteTaskDefinitions.

" + "smithy.api#documentation": "

Deregisters the specified task definition by family and revision. Upon deregistration,\n\t\t\tthe task definition is marked as INACTIVE. Existing tasks and services that\n\t\t\treference an INACTIVE task definition continue to run without disruption.\n\t\t\tExisting services that reference an INACTIVE task definition can still\n\t\t\tscale up or down by modifying the service's desired count. If you want to delete a task\n\t\t\tdefinition revision, you must first deregister the task definition revision.

\n

You can't use an INACTIVE task definition to run new tasks or create new\n\t\t\tservices, and you can't update an existing service to reference an INACTIVE\n\t\t\ttask definition. However, there may be up to a 10-minute window following deregistration\n\t\t\twhere these restrictions have not yet taken effect.

\n \n

At this time, INACTIVE task definitions remain discoverable in your\n\t\t\t\taccount indefinitely. However, this behavior is subject to change in the future. We\n\t\t\t\tdon't recommend that you rely on INACTIVE task definitions persisting\n\t\t\t\tbeyond the lifecycle of any associated tasks and services.

\n
\n

You must deregister a task definition revision before you delete it. For more\n\t\t\tinformation, see DeleteTaskDefinitions.

" } }, "com.amazonaws.ecs#DeregisterTaskDefinitionRequest": { @@ -4592,7 +4592,7 @@ "taskDefinition": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The family and revision (family:revision) or full Amazon Resource Name (ARN) of\n\t\t\tthe task definition to deregister. You must specify a revision.

", + "smithy.api#documentation": "

The family and revision (family:revision) or\n\t\t\tfull Amazon Resource Name (ARN) of the task definition to deregister. You must specify a\n\t\t\t\trevision.

", "smithy.api#required": {} } } @@ -4644,25 +4644,25 @@ "capacityProviders": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of one or more capacity providers. Up to 100 capacity\n\t\t\tproviders can be described in an action.

" + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of one or more capacity providers. Up to\n\t\t\t\t100 capacity providers can be described in an action.

" } }, "include": { "target": "com.amazonaws.ecs#CapacityProviderFieldList", "traits": { - "smithy.api#documentation": "

Specifies whether or not you want to see the resource tags for the capacity provider. If\n\t\t\t\tTAGS is specified, the tags are included in the response. If this field is omitted,\n\t\t\ttags aren't included in the response.

" + "smithy.api#documentation": "

Specifies whether or not you want to see the resource tags for the capacity provider.\n\t\t\tIf TAGS is specified, the tags are included in the response. If this field\n\t\t\tis omitted, tags aren't included in the response.

" } }, "maxResults": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The maximum number of account setting results returned by DescribeCapacityProviders in\n\t\t\tpaginated output. When this parameter is used, DescribeCapacityProviders only returns\n\t\t\t\tmaxResults results in a single page along with a nextToken response\n\t\t\telement. The remaining results of the initial request can be seen by sending another\n\t\t\t\tDescribeCapacityProviders request with the returned nextToken value. This\n\t\t\tvalue can be between 1 and 10. If\n\t\t\tthis parameter is not used, then DescribeCapacityProviders returns up to\n\t\t\t10 results and a nextToken value if\n\t\t\tapplicable.

" + "smithy.api#documentation": "

The maximum number of account setting results returned by\n\t\t\t\tDescribeCapacityProviders in paginated output. When this parameter is\n\t\t\tused, DescribeCapacityProviders only returns maxResults\n\t\t\tresults in a single page along with a nextToken response element. The\n\t\t\tremaining results of the initial request can be seen by sending another\n\t\t\t\tDescribeCapacityProviders request with the returned\n\t\t\t\tnextToken value. This value can be between\n\t\t\t1 and 10. If this\n\t\t\tparameter is not used, then DescribeCapacityProviders returns up to\n\t\t\t10 results and a nextToken value\n\t\t\tif applicable.

" } }, "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The nextToken value returned from a previous paginated\n\t\t\t\tDescribeCapacityProviders request where maxResults was used and the\n\t\t\tresults exceeded the value of that parameter. Pagination continues from the end of the previous results\n\t\t\tthat returned the nextToken value.

\n \n

This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.

\n
" + "smithy.api#documentation": "

The nextToken value returned from a previous paginated\n\t\t\t\tDescribeCapacityProviders request where maxResults was\n\t\t\tused and the results exceeded the value of that parameter. Pagination continues from the\n\t\t\tend of the previous results that returned the nextToken value.

\n \n

This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.

\n
" } } }, @@ -4688,7 +4688,7 @@ "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The nextToken value to include in a future DescribeCapacityProviders\n\t\t\trequest. When the results of a DescribeCapacityProviders request exceed\n\t\t\t\tmaxResults, this value can be used to retrieve the next page of results. This value is\n\t\t\t\tnull when there are no more results to return.

" + "smithy.api#documentation": "

The nextToken value to include in a future\n\t\t\t\tDescribeCapacityProviders request. When the results of a\n\t\t\t\tDescribeCapacityProviders request exceed maxResults, this\n\t\t\tvalue can be used to retrieve the next page of results. This value is null\n\t\t\twhen there are no more results to return.

" } } }, @@ -4746,13 +4746,13 @@ "clusters": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

A list of up to 100 cluster names or full cluster Amazon Resource Name (ARN) entries. If you do not specify a cluster, the default cluster is assumed.

" + "smithy.api#documentation": "

A list of up to 100 cluster names or full cluster Amazon Resource Name (ARN) entries.\n\t\t\tIf you do not specify a cluster, the default cluster is assumed.

" } }, "include": { "target": "com.amazonaws.ecs#ClusterFieldList", "traits": { - "smithy.api#documentation": "

Determines whether to include additional information about the clusters in the response. If this\n\t\t\tfield is omitted, this information isn't included.

\n

If ATTACHMENTS is specified, the attachments for the container instances or tasks within\n\t\t\tthe cluster are included, for example the capacity providers.

\n

If SETTINGS is specified, the settings for the cluster are included.

\n

If CONFIGURATIONS is specified, the configuration for the cluster is included.

\n

If STATISTICS is specified, the task and service count is included, separated by launch\n\t\t\ttype.

\n

If TAGS is specified, the metadata tags associated with the cluster are included.

" + "smithy.api#documentation": "

Determines whether to include additional information about the clusters in the\n\t\t\tresponse. If this field is omitted, this information isn't included.

\n

If ATTACHMENTS is specified, the attachments for the container instances\n\t\t\tor tasks within the cluster are included, for example the capacity providers.

\n

If SETTINGS is specified, the settings for the cluster are\n\t\t\tincluded.

\n

If CONFIGURATIONS is specified, the configuration for the cluster is\n\t\t\tincluded.

\n

If STATISTICS is specified, the task and service count is included,\n\t\t\tseparated by launch type.

\n

If TAGS is specified, the metadata tags associated with the cluster are\n\t\t\tincluded.

" } } }, @@ -4803,7 +4803,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes one or more container instances. Returns metadata about each container instance\n\t\t\trequested.

", + "smithy.api#documentation": "

Describes one or more container instances. Returns metadata about each container\n\t\t\tinstance requested.

", "smithy.api#examples": [ { "title": "To describe container instance", @@ -4896,7 +4896,7 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to describe.\n\t\t\tIf you do not specify a cluster, the default cluster is assumed. This parameter is required if the container instance or container instances\n\t\t\tyou are describing were launched in any cluster other than the default cluster.

" + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to\n\t\t\tdescribe. If you do not specify a cluster, the default cluster is assumed. This parameter is required if the container instance\n\t\t\tor container instances you are describing were launched in any cluster other than the\n\t\t\tdefault cluster.

" } }, "containerInstances": { @@ -4909,7 +4909,7 @@ "include": { "target": "com.amazonaws.ecs#ContainerInstanceFieldList", "traits": { - "smithy.api#documentation": "

Specifies whether you want to see the resource tags for the container instance. If TAGS\n\t\t\tis specified, the tags are included in the response. If CONTAINER_INSTANCE_HEALTH is\n\t\t\tspecified, the container instance health is included in the response. If this field is omitted, tags\n\t\t\tand container instance health status aren't included in the response.

" + "smithy.api#documentation": "

Specifies whether you want to see the resource tags for the container instance. If\n\t\t\t\tTAGS is specified, the tags are included in the response. If\n\t\t\t\tCONTAINER_INSTANCE_HEALTH is specified, the container instance health\n\t\t\tis included in the response. If this field is omitted, tags and container instance\n\t\t\thealth status aren't included in the response.

" } } }, @@ -4969,7 +4969,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes one or more of your service deployments.

\n

A service deployment happens when you release a software update for the service. For more information, see Amazon ECS service deployments.

" + "smithy.api#documentation": "

Describes one or more of your service deployments.

\n

A service deployment happens when you release a software update for the service. For\n\t\t\tmore information, see Amazon ECS service\n\t\t\t\tdeployments.

" } }, "com.amazonaws.ecs#DescribeServiceDeploymentsRequest": { @@ -4999,7 +4999,7 @@ "failures": { "target": "com.amazonaws.ecs#Failures", "traits": { - "smithy.api#documentation": "

Any failures associated with the call.

\n

If you decsribe a deployment with a service revision created before October 25, 2024, the\n\t\t\tcall fails. The failure includes the service revision ARN and the reason set to\n\t\t\tMISSING.

" + "smithy.api#documentation": "

Any failures associated with the call.

\n

If you decsribe a deployment with a service revision created before October 25, 2024,\n\t\t\tthe call fails. The failure includes the service revision ARN and the reason set to\n\t\t\t\tMISSING.

" } } }, @@ -5039,7 +5039,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes one or more service revisions.

\n

A service revision is a version of the service that includes the values for the Amazon ECS\n\t\t\tresources (for example, task definition) and the environment resources (for example,\n\t\t\tload balancers, subnets, and security groups). For more information, see Amazon ECS service revisions.

\n

You can't describe a service revision that was created before October 25, 2024.

" + "smithy.api#documentation": "

Describes one or more service revisions.

\n

A service revision is a version of the service that includes the values for the Amazon\n\t\t\tECS resources (for example, task definition) and the environment resources (for example,\n\t\t\tload balancers, subnets, and security groups). For more information, see Amazon ECS service revisions.

\n

You can't describe a service revision that was created before October 25, 2024.

" } }, "com.amazonaws.ecs#DescribeServiceRevisionsRequest": { @@ -5048,7 +5048,7 @@ "serviceRevisionArns": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The ARN of the service revision.

\n

You can specify a maximum of 20 ARNs.

\n

You can call ListServiceDeployments to\n\t\t\tget the ARNs.

", + "smithy.api#documentation": "

The ARN of the service revision.

\n

You can specify a maximum of 20 ARNs.

\n

You can call ListServiceDeployments to get the ARNs.

", "smithy.api#required": {} } } @@ -5232,20 +5232,20 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN)the cluster that hosts the service to describe.\n\t\t\tIf you do not specify a cluster, the default cluster is assumed. This parameter is required if the service or services you are describing were\n\t\t\tlaunched in any cluster other than the default cluster.

" + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN)the cluster that hosts the service to describe.\n\t\t\tIf you do not specify a cluster, the default cluster is assumed. This parameter is required if the service or services you are\n\t\t\tdescribing were launched in any cluster other than the default cluster.

" } }, "services": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

A list of services to describe. You may specify up to 10 services to describe in a single\n\t\t\toperation.

", + "smithy.api#documentation": "

A list of services to describe. You may specify up to 10 services to describe in a\n\t\t\tsingle operation.

", "smithy.api#required": {} } }, "include": { "target": "com.amazonaws.ecs#ServiceFieldList", "traits": { - "smithy.api#documentation": "

Determines whether you want to see the resource tags for the service. If TAGS is\n\t\t\tspecified, the tags are included in the response. If this field is omitted, tags aren't included in the\n\t\t\tresponse.

" + "smithy.api#documentation": "

Determines whether you want to see the resource tags for the service. If\n\t\t\t\tTAGS is specified, the tags are included in the response. If this field\n\t\t\tis omitted, tags aren't included in the response.

" } } }, @@ -5293,7 +5293,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes a task definition. You can specify a family and revision to find\n\t\t\tinformation about a specific task definition, or you can simply specify the family to find the latest\n\t\t\t\tACTIVE revision in that family.

\n \n

You can only describe INACTIVE task definitions while an active task or service\n\t\t\t\treferences them.

\n
", + "smithy.api#documentation": "

Describes a task definition. You can specify a family and\n\t\t\t\trevision to find information about a specific task definition, or you\n\t\t\tcan simply specify the family to find the latest ACTIVE revision in that\n\t\t\tfamily.

\n \n

You can only describe INACTIVE task definitions while an active task\n\t\t\t\tor service references them.

\n
", "smithy.api#examples": [ { "title": "To describe a task definition", @@ -5356,14 +5356,14 @@ "taskDefinition": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The family for the latest ACTIVE revision, family and\n\t\t\t\trevision (family:revision) for a specific revision in the family, or full\n\t\t\tAmazon Resource Name (ARN) of the task definition to describe.

", + "smithy.api#documentation": "

The family for the latest ACTIVE revision,\n\t\t\t\tfamily and revision (family:revision) for a\n\t\t\tspecific revision in the family, or full Amazon Resource Name (ARN) of the task definition to\n\t\t\tdescribe.

", "smithy.api#required": {} } }, "include": { "target": "com.amazonaws.ecs#TaskDefinitionFieldList", "traits": { - "smithy.api#documentation": "

Determines whether to see the resource tags for the task definition. If TAGS is\n\t\t\tspecified, the tags are included in the response. If this field is omitted, tags aren't included in the\n\t\t\tresponse.

" + "smithy.api#documentation": "

Determines whether to see the resource tags for the task definition. If\n\t\t\t\tTAGS is specified, the tags are included in the response. If this field\n\t\t\tis omitted, tags aren't included in the response.

" } } }, @@ -5383,7 +5383,7 @@ "tags": { "target": "com.amazonaws.ecs#Tags", "traits": { - "smithy.api#documentation": "

The metadata that's applied to the task definition to help you categorize and organize them. Each tag\n\t\t\tconsists of a key and an optional value. You define both.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" + "smithy.api#documentation": "

The metadata that's applied to the task definition to help you categorize and organize\n\t\t\tthem. Each tag consists of a key and an optional value. You define both.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" } } }, @@ -5426,7 +5426,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes the task sets in the specified cluster and service. This is used when a service uses the\n\t\t\t\tEXTERNAL deployment controller type. For more information, see Amazon ECS Deployment\n\t\t\t\tTypes in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Describes the task sets in the specified cluster and service. This is used when a\n\t\t\tservice uses the EXTERNAL deployment controller type. For more information,\n\t\t\tsee Amazon ECS Deployment\n\t\t\t\tTypes in the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#DescribeTaskSetsRequest": { @@ -5435,7 +5435,7 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task sets exist\n\t\t\tin.

", + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task\n\t\t\tsets exist in.

", "smithy.api#required": {} } }, @@ -5449,13 +5449,13 @@ "taskSets": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The ID or full Amazon Resource Name (ARN) of task sets to describe.

" + "smithy.api#documentation": "

The ID or full Amazon Resource Name (ARN) of task sets to\n\t\t\tdescribe.

" } }, "include": { "target": "com.amazonaws.ecs#TaskSetFieldList", "traits": { - "smithy.api#documentation": "

Specifies whether to see the resource tags for the task set. If TAGS is specified, the\n\t\t\ttags are included in the response. If this field is omitted, tags aren't included in the\n\t\t\tresponse.

" + "smithy.api#documentation": "

Specifies whether to see the resource tags for the task set. If TAGS is\n\t\t\tspecified, the tags are included in the response. If this field is omitted, tags aren't\n\t\t\tincluded in the response.

" } } }, @@ -5506,7 +5506,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes a specified task or tasks.

\n

Currently, stopped tasks appear in the returned results for at least one hour.

\n

If you have tasks with tags, and then delete the cluster, the tagged tasks are returned in the\n\t\t\tresponse. If you create a new cluster with the same name as the deleted cluster, the tagged tasks are\n\t\t\tnot included in the response.

", + "smithy.api#documentation": "

Describes a specified task or tasks.

\n

Currently, stopped tasks appear in the returned results for at least one hour.

\n

If you have tasks with tags, and then delete the cluster, the tagged tasks are\n\t\t\treturned in the response. If you create a new cluster with the same name as the deleted\n\t\t\tcluster, the tagged tasks are not included in the response.

", "smithy.api#examples": [ { "title": "To describe a task", @@ -5614,7 +5614,7 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task or tasks to describe.\n\t\t\tIf you do not specify a cluster, the default cluster is assumed. This parameter is required. If you do not specify a value, the\n\t\t\t\tdefault cluster is used.

" + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task or tasks to\n\t\t\tdescribe. If you do not specify a cluster, the default cluster is assumed. This parameter is required. If you do not specify a\n\t\t\tvalue, the default cluster is used.

" } }, "tasks": { @@ -5627,7 +5627,7 @@ "include": { "target": "com.amazonaws.ecs#TaskFieldList", "traits": { - "smithy.api#documentation": "

Specifies whether you want to see the resource tags for the task. If TAGS is specified,\n\t\t\tthe tags are included in the response. If this field is omitted, tags aren't included in the\n\t\t\tresponse.

" + "smithy.api#documentation": "

Specifies whether you want to see the resource tags for the task. If TAGS\n\t\t\tis specified, the tags are included in the response. If this field is omitted, tags\n\t\t\taren't included in the response.

" } } }, @@ -5697,7 +5697,7 @@ "permissions": { "target": "com.amazonaws.ecs#DeviceCgroupPermissions", "traits": { - "smithy.api#documentation": "

The explicit permissions to provide to the container for the device. By default, the container has\n\t\t\tpermissions for read, write, and mknod for the device.

" + "smithy.api#documentation": "

The explicit permissions to provide to the container for the device. By default, the\n\t\t\tcontainer has permissions for read, write, and\n\t\t\t\tmknod for the device.

" } } }, @@ -5766,13 +5766,13 @@ "containerInstance": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The container instance ID or full ARN of the container instance. For more information about the\n\t\t\tARN format, see Amazon Resource Name (ARN)\n\t\t\tin the Amazon ECS Developer Guide.

" + "smithy.api#documentation": "

The container instance ID or full ARN of the container instance. For more\n\t\t\tinformation about the ARN format, see Amazon Resource Name (ARN) in the Amazon ECS Developer Guide.

" } }, "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that the container instance belongs to.

" + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that the container instance belongs\n\t\t\tto.

" } } }, @@ -5821,36 +5821,36 @@ "scope": { "target": "com.amazonaws.ecs#Scope", "traits": { - "smithy.api#documentation": "

The scope for the Docker volume that determines its lifecycle. Docker volumes that are scoped to a\n\t\t\t\ttask are automatically provisioned when the task starts and destroyed when the task\n\t\t\tstops. Docker volumes that are scoped as shared persist after the task stops.

" + "smithy.api#documentation": "

The scope for the Docker volume that determines its lifecycle. Docker volumes that are\n\t\t\tscoped to a task are automatically provisioned when the task starts and\n\t\t\tdestroyed when the task stops. Docker volumes that are scoped as shared\n\t\t\tpersist after the task stops.

" } }, "autoprovision": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

If this value is true, the Docker volume is created if it doesn't already exist.

\n \n

This field is only used if the scope is shared.

\n
" + "smithy.api#documentation": "

If this value is true, the Docker volume is created if it doesn't already\n\t\t\texist.

\n \n

This field is only used if the scope is shared.

\n
" } }, "driver": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Docker volume driver to use. The driver value must match the driver name provided by Docker\n\t\t\tbecause it is used for task placement. If the driver was installed using the Docker plugin CLI, use\n\t\t\t\tdocker plugin ls to retrieve the driver name from your container instance. If the\n\t\t\tdriver was installed using another method, use Docker plugin discovery to retrieve the driver name.\n\t\t\tThis parameter maps to Driver in the docker container create command and the\n\t\t\t\txxdriver option to docker volume create.

" + "smithy.api#documentation": "

The Docker volume driver to use. The driver value must match the driver name provided\n\t\t\tby Docker because it is used for task placement. If the driver was installed using the\n\t\t\tDocker plugin CLI, use docker plugin ls to retrieve the driver name from\n\t\t\tyour container instance. If the driver was installed using another method, use Docker\n\t\t\tplugin discovery to retrieve the driver name. This parameter maps to Driver\n\t\t\tin the docker container create command and the xxdriver option to docker\n\t\t\tvolume create.

" } }, "driverOpts": { "target": "com.amazonaws.ecs#StringMap", "traits": { - "smithy.api#documentation": "

A map of Docker driver-specific options passed through. This parameter maps to\n\t\t\t\tDriverOpts in the docker create-volume command and the xxopt option to\n\t\t\tdocker volume create.

" + "smithy.api#documentation": "

A map of Docker driver-specific options passed through. This parameter maps to\n\t\t\t\tDriverOpts in the docker create-volume command and the\n\t\t\t\txxopt option to docker volume create.

" } }, "labels": { "target": "com.amazonaws.ecs#StringMap", "traits": { - "smithy.api#documentation": "

Custom metadata to add to your Docker volume. This parameter maps to Labels in the\n\t\t\tdocker container create command and the xxlabel option to docker volume create.

" + "smithy.api#documentation": "

Custom metadata to add to your Docker volume. This parameter maps to\n\t\t\t\tLabels in the docker container create command and the\n\t\t\t\txxlabel option to docker volume create.

" } } }, "traits": { - "smithy.api#documentation": "

This parameter is specified when you're using Docker volumes. Docker volumes are only supported when\n\t\t\tyou're using the EC2 launch type. Windows containers only support the use of the\n\t\t\t\tlocal driver. To use bind mounts, specify a host instead.

" + "smithy.api#documentation": "

This parameter is specified when you're using Docker volumes. Docker volumes are only\n\t\t\tsupported when you're using the EC2 launch type. Windows containers only\n\t\t\tsupport the use of the local driver. To use bind mounts, specify a\n\t\t\t\thost instead.

" } }, "com.amazonaws.ecs#Double": { @@ -5904,7 +5904,7 @@ "propagateTags": { "target": "com.amazonaws.ecs#PropagateTags", "traits": { - "smithy.api#documentation": "

Determines whether to propagate the tags from the task definition to \u2028the Amazon EBS volume. Tags can only\n\t\t\tpropagate to a SERVICE specified in \u2028ServiceVolumeConfiguration. If no value\n\t\t\tis specified, the tags aren't \u2028propagated.

" + "smithy.api#documentation": "

Determines whether to propagate the tags from the task definition to \u2028the Amazon EBS\n\t\t\tvolume. Tags can only propagate to a SERVICE specified in\n\t\t\t\t\u2028ServiceVolumeConfiguration. If no value is specified, the tags aren't\n\t\t\t\u2028propagated.

" } } }, @@ -5930,13 +5930,13 @@ "accessPointId": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified\n\t\t\tin the EFSVolumeConfiguration must either be omitted or set to / which will\n\t\t\tenforce the path set on the EFS access point. If an access point is used, transit encryption must be on\n\t\t\tin the EFSVolumeConfiguration. For more information, see Working with Amazon EFS access points in the\n\t\t\tAmazon Elastic File System User Guide.

" + "smithy.api#documentation": "

The Amazon EFS access point ID to use. If an access point is specified, the root directory\n\t\t\tvalue specified in the EFSVolumeConfiguration must either be omitted or set\n\t\t\tto / which will enforce the path set on the EFS access point. If an access\n\t\t\tpoint is used, transit encryption must be on in the EFSVolumeConfiguration.\n\t\t\tFor more information, see Working with Amazon EFS access\n\t\t\t\tpoints in the Amazon Elastic File System User Guide.

" } }, "iam": { "target": "com.amazonaws.ecs#EFSAuthorizationConfigIAM", "traits": { - "smithy.api#documentation": "

Determines whether to use the Amazon ECS task role defined in a task definition when mounting the Amazon EFS\n\t\t\tfile system. If it is turned on, transit encryption must be turned on in the\n\t\t\t\tEFSVolumeConfiguration. If this parameter is omitted, the default value of\n\t\t\t\tDISABLED is used. For more information, see Using Amazon EFS access\n\t\t\t\tpoints in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Determines whether to use the Amazon ECS task role defined in a task definition when\n\t\t\tmounting the Amazon EFS file system. If it is turned on, transit encryption must be turned on\n\t\t\tin the EFSVolumeConfiguration. If this parameter is omitted, the default\n\t\t\tvalue of DISABLED is used. For more information, see Using\n\t\t\t\tAmazon EFS access points in the Amazon Elastic Container Service Developer Guide.

" } } }, @@ -5991,19 +5991,19 @@ "rootDirectory": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The directory within the Amazon EFS file system to mount as the root directory inside the host. If this\n\t\t\tparameter is omitted, the root of the Amazon EFS volume will be used. Specifying / will have\n\t\t\tthe same effect as omitting this parameter.

\n \n

If an EFS access point is specified in the authorizationConfig, the root directory\n\t\t\t\tparameter must either be omitted or set to / which will enforce the path set on the\n\t\t\t\tEFS access point.

\n
" + "smithy.api#documentation": "

The directory within the Amazon EFS file system to mount as the root directory inside the\n\t\t\thost. If this parameter is omitted, the root of the Amazon EFS volume will be used.\n\t\t\tSpecifying / will have the same effect as omitting this parameter.

\n \n

If an EFS access point is specified in the authorizationConfig, the\n\t\t\t\troot directory parameter must either be omitted or set to / which will\n\t\t\t\tenforce the path set on the EFS access point.

\n
" } }, "transitEncryption": { "target": "com.amazonaws.ecs#EFSTransitEncryption", "traits": { - "smithy.api#documentation": "

Determines whether to use encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS\n\t\t\tserver. Transit encryption must be turned on if Amazon EFS IAM authorization is used. If this parameter is\n\t\t\tomitted, the default value of DISABLED is used. For more information, see Encrypting data in\n\t\t\t\ttransit in the Amazon Elastic File System User Guide.

" + "smithy.api#documentation": "

Determines whether to use encryption for Amazon EFS data in transit between the Amazon ECS host\n\t\t\tand the Amazon EFS server. Transit encryption must be turned on if Amazon EFS IAM authorization\n\t\t\tis used. If this parameter is omitted, the default value of DISABLED is\n\t\t\tused. For more information, see Encrypting data in transit in\n\t\t\tthe Amazon Elastic File System User Guide.

" } }, "transitEncryptionPort": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you do\n\t\t\tnot specify a transit encryption port, it will use the port selection strategy that the Amazon EFS mount\n\t\t\thelper uses. For more information, see EFS mount helper in the Amazon Elastic File System User Guide.

" + "smithy.api#documentation": "

The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS\n\t\t\tserver. If you do not specify a transit encryption port, it will use the port selection\n\t\t\tstrategy that the Amazon EFS mount helper uses. For more information, see EFS mount\n\t\t\t\thelper in the Amazon Elastic File System User Guide.

" } }, "authorizationConfig": { @@ -6014,7 +6014,7 @@ } }, "traits": { - "smithy.api#documentation": "

This parameter is specified when you're using an Amazon Elastic File System file system for task storage. For more\n\t\t\tinformation, see Amazon EFS volumes in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

This parameter is specified when you're using an Amazon Elastic File System file system for task\n\t\t\tstorage. For more information, see Amazon EFS volumes in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#EnvironmentFile": { @@ -6023,20 +6023,20 @@ "value": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment variable\n\t\t\tfile.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment\n\t\t\tvariable file.

", "smithy.api#required": {} } }, "type": { "target": "com.amazonaws.ecs#EnvironmentFileType", "traits": { - "smithy.api#documentation": "

The file type to use. Environment files are objects in Amazon S3. The only supported value is\n\t\t\t\ts3.

", + "smithy.api#documentation": "

The file type to use. Environment files are objects in Amazon S3. The only supported value\n\t\t\tis s3.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

A list of files containing the environment variables to pass to a container. You can specify up to\n\t\t\tten environment files. The file must have a .env file extension. Each line in an\n\t\t\tenvironment file should contain an environment variable in VARIABLE=VALUE format. Lines\n\t\t\tbeginning with # are treated as comments and are ignored.

\n

If there are environment variables specified using the environment parameter in a\n\t\t\tcontainer definition, they take precedence over the variables contained within an environment file. If\n\t\t\tmultiple environment files are specified that contain the same variable, they're processed from the top\n\t\t\tdown. We recommend that you use unique variable names. For more information, see Use a file to\n\t\t\t\tpass environment variables to a container in the Amazon Elastic Container Service Developer Guide.

\n

Environment variable files are objects in Amazon S3 and all Amazon S3 security considerations apply.

\n

You must use the following platforms for the Fargate launch type:

\n
    \n
  • \n

    Linux platform version 1.4.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
\n

Consider the following when using the Fargate launch type:

\n
    \n
  • \n

    The file is handled like a native Docker env-file.

    \n
  • \n
  • \n

    There is no support for shell escape handling.

    \n
  • \n
  • \n

    The container entry point interperts the VARIABLE values.

    \n
  • \n
" + "smithy.api#documentation": "

A list of files containing the environment variables to pass to a container. You can\n\t\t\tspecify up to ten environment files. The file must have a .env file\n\t\t\textension. Each line in an environment file should contain an environment variable in\n\t\t\t\tVARIABLE=VALUE format. Lines beginning with # are treated\n\t\t\tas comments and are ignored.

\n

If there are environment variables specified using the environment\n\t\t\tparameter in a container definition, they take precedence over the variables contained\n\t\t\twithin an environment file. If multiple environment files are specified that contain the\n\t\t\tsame variable, they're processed from the top down. We recommend that you use unique\n\t\t\tvariable names. For more information, see Use a file to pass\n\t\t\t\tenvironment variables to a container in the Amazon Elastic Container Service Developer Guide.

\n

Environment variable files are objects in Amazon S3 and all Amazon S3 security considerations\n\t\t\tapply.

\n

You must use the following platforms for the Fargate launch type:

\n
    \n
  • \n

    Linux platform version 1.4.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
\n

Consider the following when using the Fargate launch type:

\n
    \n
  • \n

    The file is handled like a native Docker env-file.

    \n
  • \n
  • \n

    There is no support for shell escape handling.

    \n
  • \n
  • \n

    The container entry point interperts the VARIABLE values.

    \n
  • \n
" } }, "com.amazonaws.ecs#EnvironmentFileType": { @@ -6069,13 +6069,13 @@ "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported\n\t\t\tvalue is 21 GiB and the maximum supported value is 200\n\t\t\tGiB.

", + "smithy.api#documentation": "

The total amount, in GiB, of ephemeral storage to set for the task. The minimum\n\t\t\tsupported value is 21 GiB and the maximum supported value is\n\t\t\t\t200 GiB.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total\n\t\t\tamount of ephemeral storage available, beyond the default amount, for tasks hosted on Fargate. For\n\t\t\tmore information, see Using data volumes in tasks\n\t\t\tin the Amazon ECS Developer Guide;.

\n \n

For tasks using the Fargate launch type, the task requires the following\n\t\t\t\tplatforms:

\n
    \n
  • \n

    Linux platform version 1.4.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
\n
" + "smithy.api#documentation": "

The amount of ephemeral storage to allocate for the task. This parameter is used to\n\t\t\texpand the total amount of ephemeral storage available, beyond the default amount, for\n\t\t\ttasks hosted on Fargate. For more information, see Using data volumes in\n\t\t\t\ttasks in the Amazon ECS Developer Guide;.

\n \n

For tasks using the Fargate launch type, the task requires the\n\t\t\t\tfollowing platforms:

\n
    \n
  • \n

    Linux platform version 1.4.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
\n
" } }, "com.amazonaws.ecs#ExecuteCommand": { @@ -6107,7 +6107,7 @@ } ], "traits": { - "smithy.api#documentation": "

Runs a command remotely on a container within a task.

\n

If you use a condition key in your IAM policy to refine the conditions for the policy statement,\n\t\t\tfor example limit the actions to a specific cluster, you receive an AccessDeniedException\n\t\t\twhen there is a mismatch between the condition key value and the corresponding parameter value.

\n

For information about required permissions and considerations, see Using Amazon ECS Exec for debugging in the\n\t\t\t\tAmazon ECS Developer Guide.

" + "smithy.api#documentation": "

Runs a command remotely on a container within a task.

\n

If you use a condition key in your IAM policy to refine the conditions for the\n\t\t\tpolicy statement, for example limit the actions to a specific cluster, you receive an\n\t\t\t\tAccessDeniedException when there is a mismatch between the condition\n\t\t\tkey value and the corresponding parameter value.

\n

For information about required permissions and considerations, see Using Amazon ECS\n\t\t\t\tExec for debugging in the Amazon ECS Developer Guide.\n\t\t

" } }, "com.amazonaws.ecs#ExecuteCommandConfiguration": { @@ -6116,19 +6116,19 @@ "kmsKeyId": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

Specify an Key Management Service key ID to encrypt the data between the local client and the\n\t\t\tcontainer.

" + "smithy.api#documentation": "

Specify an Key Management Service key ID to encrypt the data between the local client\n\t\t\tand the container.

" } }, "logging": { "target": "com.amazonaws.ecs#ExecuteCommandLogging", "traits": { - "smithy.api#documentation": "

The log setting to use for redirecting logs for your execute command results. The following log\n\t\t\tsettings are available.

\n
    \n
  • \n

    \n NONE: The execute command session is not logged.

    \n
  • \n
  • \n

    \n DEFAULT: The awslogs configuration in the task definition is used.\n\t\t\t\t\tIf no logging parameter is specified, it defaults to this value. If no awslogs log\n\t\t\t\t\tdriver is configured in the task definition, the output won't be logged.

    \n
  • \n
  • \n

    \n OVERRIDE: Specify the logging details as a part of\n\t\t\t\t\tlogConfiguration. If the OVERRIDE logging option is specified, the\n\t\t\t\t\t\tlogConfiguration is required.

    \n
  • \n
" + "smithy.api#documentation": "

The log setting to use for redirecting logs for your execute command results. The\n\t\t\tfollowing log settings are available.

\n
    \n
  • \n

    \n NONE: The execute command session is not logged.

    \n
  • \n
  • \n

    \n DEFAULT: The awslogs configuration in the task\n\t\t\t\t\tdefinition is used. If no logging parameter is specified, it defaults to this\n\t\t\t\t\tvalue. If no awslogs log driver is configured in the task\n\t\t\t\t\tdefinition, the output won't be logged.

    \n
  • \n
  • \n

    \n OVERRIDE: Specify the logging details as a part of\n\t\t\t\t\t\tlogConfiguration. If the OVERRIDE logging option\n\t\t\t\t\tis specified, the logConfiguration is required.

    \n
  • \n
" } }, "logConfiguration": { "target": "com.amazonaws.ecs#ExecuteCommandLogConfiguration", "traits": { - "smithy.api#documentation": "

The log configuration for the results of the execute command actions. The logs can be sent to\n\t\t\tCloudWatch Logs or an Amazon S3 bucket. When logging=OVERRIDE is specified, a\n\t\t\t\tlogConfiguration must be provided.

" + "smithy.api#documentation": "

The log configuration for the results of the execute command actions. The logs can be\n\t\t\tsent to CloudWatch Logs or an Amazon S3 bucket. When logging=OVERRIDE is\n\t\t\tspecified, a logConfiguration must be provided.

" } } }, @@ -6149,7 +6149,7 @@ "target": "com.amazonaws.ecs#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Determines whether to use encryption on the CloudWatch logs. If not specified, encryption will be\n\t\t\toff.

" + "smithy.api#documentation": "

Determines whether to use encryption on the CloudWatch logs. If not specified,\n\t\t\tencryption will be off.

" } }, "s3BucketName": { @@ -6162,7 +6162,7 @@ "target": "com.amazonaws.ecs#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Determines whether to use encryption on the S3 logs. If not specified, encryption is not used.

" + "smithy.api#documentation": "

Determines whether to use encryption on the S3 logs. If not specified, encryption is\n\t\t\tnot used.

" } }, "s3KeyPrefix": { @@ -6173,7 +6173,7 @@ } }, "traits": { - "smithy.api#documentation": "

The log configuration for the results of the execute command actions. The logs can be sent to\n\t\t\tCloudWatch Logs or an Amazon S3 bucket.

" + "smithy.api#documentation": "

The log configuration for the results of the execute command actions. The logs can be\n\t\t\tsent to CloudWatch Logs or an Amazon S3 bucket.

" } }, "com.amazonaws.ecs#ExecuteCommandLogging": { @@ -6211,7 +6211,7 @@ "container": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of the container to execute the command on. A container name only needs to be specified for\n\t\t\ttasks containing multiple containers.

" + "smithy.api#documentation": "

The name of the container to execute the command on. A container name only needs to be\n\t\t\tspecified for tasks containing multiple containers.

" } }, "command": { @@ -6266,13 +6266,13 @@ "target": "com.amazonaws.ecs#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Determines whether the execute command session is running in interactive mode. Amazon ECS only supports\n\t\t\tinitiating interactive sessions, so you must specify true for this value.

" + "smithy.api#documentation": "

Determines whether the execute command session is running in interactive mode. Amazon ECS\n\t\t\tonly supports initiating interactive sessions, so you must specify true for\n\t\t\tthis value.

" } }, "session": { "target": "com.amazonaws.ecs#Session", "traits": { - "smithy.api#documentation": "

The details of the SSM session that was created for this instance of execute-command.

" + "smithy.api#documentation": "

The details of the SSM session that was created for this instance of\n\t\t\texecute-command.

" } }, "taskArn": { @@ -6292,20 +6292,20 @@ "credentialsParameter": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The authorization credential option to use. The authorization credential options can be provided\n\t\t\tusing either the Amazon Resource Name (ARN) of an Secrets Manager secret or SSM Parameter Store parameter. The ARN refers to\n\t\t\tthe stored credentials.

", + "smithy.api#documentation": "

The authorization credential option to use. The authorization credential options can\n\t\t\tbe provided using either the Amazon Resource Name (ARN) of an Secrets Manager secret or SSM Parameter Store\n\t\t\tparameter. The ARN refers to the stored credentials.

", "smithy.api#required": {} } }, "domain": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

A fully qualified domain name hosted by an Directory Service Managed\n\t\t\tMicrosoft AD (Active Directory) or self-hosted AD on Amazon EC2.

", + "smithy.api#documentation": "

A fully qualified domain name hosted by an Directory Service Managed Microsoft AD (Active Directory) or self-hosted AD on\n\t\t\tAmazon EC2.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

The authorization configuration details for Amazon FSx for Windows File Server file system. See FSxWindowsFileServerVolumeConfiguration in the Amazon ECS API\n\t\t\tReference.

\n

For more information and the input format, see Amazon FSx for Windows File Server Volumes in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The authorization configuration details for Amazon FSx for Windows File Server file system. See FSxWindowsFileServerVolumeConfiguration in the Amazon ECS API\n\t\t\t\tReference.

\n

For more information and the input format, see Amazon FSx for Windows File Server Volumes\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#FSxWindowsFileServerVolumeConfiguration": { @@ -6321,7 +6321,7 @@ "rootDirectory": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The directory within the Amazon FSx for Windows File Server file system to mount as the root directory inside the\n\t\t\thost.

", + "smithy.api#documentation": "

The directory within the Amazon FSx for Windows File Server file system to mount as the root directory\n\t\t\tinside the host.

", "smithy.api#required": {} } }, @@ -6334,7 +6334,7 @@ } }, "traits": { - "smithy.api#documentation": "

This parameter is specified when you're using Amazon FSx for Windows File Server file system for task\n\t\t\tstorage.

\n

For more information and the input format, see Amazon FSx for Windows File Server volumes in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

This parameter is specified when you're using Amazon FSx for Windows File Server file system for task\n\t\t\tstorage.

\n

For more information and the input format, see Amazon FSx for Windows File Server volumes\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#Failure": { @@ -6360,7 +6360,7 @@ } }, "traits": { - "smithy.api#documentation": "

A failed resource. For a list of common causes, see API failure reasons in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

A failed resource. For a list of common causes, see API failure\n\t\t\t\treasons in the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#Failures": { @@ -6375,19 +6375,19 @@ "type": { "target": "com.amazonaws.ecs#FirelensConfigurationType", "traits": { - "smithy.api#documentation": "

The log router to use. The valid values are fluentd or fluentbit.

", + "smithy.api#documentation": "

The log router to use. The valid values are fluentd or\n\t\t\t\tfluentbit.

", "smithy.api#required": {} } }, "options": { "target": "com.amazonaws.ecs#FirelensConfigurationOptionsMap", "traits": { - "smithy.api#documentation": "

The options to use when configuring the log router. This field is optional and can be used to specify\n\t\t\ta custom configuration file or to add additional metadata, such as the task, task definition, cluster,\n\t\t\tand container instance details to the log event. If specified, the syntax to use is\n\t\t\t\t\"options\":{\"enable-ecs-log-metadata\":\"true|false\",\"config-file-type:\"s3|file\",\"config-file-value\":\"arn:aws:s3:::mybucket/fluent.conf|filepath\"}.\n\t\t\tFor more information, see Creating a task\n\t\t\t\tdefinition that uses a FireLens configuration in the Amazon Elastic Container Service Developer Guide.

\n \n

Tasks hosted on Fargate only support the file configuration file type.

\n
" + "smithy.api#documentation": "

The options to use when configuring the log router. This field is optional and can be\n\t\t\tused to specify a custom configuration file or to add additional metadata, such as the\n\t\t\ttask, task definition, cluster, and container instance details to the log event. If\n\t\t\tspecified, the syntax to use is\n\t\t\t\t\"options\":{\"enable-ecs-log-metadata\":\"true|false\",\"config-file-type:\"s3|file\",\"config-file-value\":\"arn:aws:s3:::mybucket/fluent.conf|filepath\"}.\n\t\t\tFor more information, see Creating\n\t\t\t\ta task definition that uses a FireLens configuration in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

\n \n

Tasks hosted on Fargate only support the file configuration file\n\t\t\t\ttype.

\n
" } } }, "traits": { - "smithy.api#documentation": "

The FireLens configuration for the container. This is used to specify and configure a log router for\n\t\t\tcontainer logs. For more information, see Custom log routing in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The FireLens configuration for the container. This is used to specify and configure a\n\t\t\tlog router for container logs. For more information, see Custom log routing\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#FirelensConfigurationOptionsMap": { @@ -6479,7 +6479,7 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task sets exist\n\t\t\tin.

", + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task\n\t\t\tsets exist in.

", "smithy.api#required": {} } }, @@ -6500,7 +6500,7 @@ "protectedTasks": { "target": "com.amazonaws.ecs#ProtectedTasks", "traits": { - "smithy.api#documentation": "

A list of tasks with the following information.

\n
    \n
  • \n

    \n taskArn: The task ARN.

    \n
  • \n
  • \n

    \n protectionEnabled: The protection status of the task. If scale-in protection is\n\t\t\t\t\tturned on for a task, the value is true. Otherwise, it is\n\t\t\t\t\tfalse.

    \n
  • \n
  • \n

    \n expirationDate: The epoch time when protection for the task will expire.

    \n
  • \n
" + "smithy.api#documentation": "

A list of tasks with the following information.

\n
    \n
  • \n

    \n taskArn: The task ARN.

    \n
  • \n
  • \n

    \n protectionEnabled: The protection status of the task. If scale-in\n\t\t\t\t\tprotection is turned on for a task, the value is true. Otherwise,\n\t\t\t\t\tit is false.

    \n
  • \n
  • \n

    \n expirationDate: The epoch time when protection for the task will\n\t\t\t\t\texpire.

    \n
  • \n
" } }, "failures": { @@ -6526,37 +6526,37 @@ "command": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

A string array representing the command that the container runs to determine if it is healthy. The\n\t\t\tstring array must start with CMD to run the command arguments directly, or\n\t\t\t\tCMD-SHELL to run the command with the container's default shell.

\n

When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list of commands in\n\t\t\tdouble quotes and brackets.

\n

\n [ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]\n

\n

You don't include the double quotes and brackets when you use the Amazon Web Services Management Console.

\n

\n CMD-SHELL, curl -f http://localhost/ || exit 1\n

\n

An exit code of 0 indicates success, and non-zero exit code indicates failure. For\n\t\t\tmore information, see HealthCheck in the docker container create command.

", + "smithy.api#documentation": "

A string array representing the command that the container runs to determine if it is\n\t\t\thealthy. The string array must start with CMD to run the command arguments\n\t\t\tdirectly, or CMD-SHELL to run the command with the container's default\n\t\t\tshell.

\n

When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list\n\t\t\tof commands in double quotes and brackets.

\n

\n [ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]\n

\n

You don't include the double quotes and brackets when you use the Amazon Web Services Management Console.

\n

\n CMD-SHELL, curl -f http://localhost/ || exit 1\n

\n

An exit code of 0 indicates success, and non-zero exit code indicates failure. For\n\t\t\tmore information, see HealthCheck in the docker container create\n\t\t\tcommand.

", "smithy.api#required": {} } }, "interval": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The time period in seconds between each health check execution. You may specify between 5 and 300\n\t\t\tseconds. The default value is 30 seconds.

" + "smithy.api#documentation": "

The time period in seconds between each health check execution. You may specify\n\t\t\tbetween 5 and 300 seconds. The default value is 30 seconds.

" } }, "timeout": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The time period in seconds to wait for a health check to succeed before it is considered a failure.\n\t\t\tYou may specify between 2 and 60 seconds. The default value is 5.

" + "smithy.api#documentation": "

The time period in seconds to wait for a health check to succeed before it is\n\t\t\tconsidered a failure. You may specify between 2 and 60 seconds. The default value is\n\t\t\t5.

" } }, "retries": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The number of times to retry a failed health check before the container is considered unhealthy. You\n\t\t\tmay specify between 1 and 10 retries. The default value is 3.

" + "smithy.api#documentation": "

The number of times to retry a failed health check before the container is considered\n\t\t\tunhealthy. You may specify between 1 and 10 retries. The default value is 3.

" } }, "startPeriod": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The optional grace period to provide containers time to bootstrap before failed health checks count\n\t\t\ttowards the maximum number of retries. You can specify between 0 and 300 seconds. By default, the\n\t\t\t\tstartPeriod is off.

\n \n

If a health check succeeds within the startPeriod, then the container is considered\n\t\t\t\thealthy and any subsequent failures count toward the maximum number of retries.

\n
" + "smithy.api#documentation": "

The optional grace period to provide containers time to bootstrap before failed health\n\t\t\tchecks count towards the maximum number of retries. You can specify between 0 and 300\n\t\t\tseconds. By default, the startPeriod is off.

\n \n

If a health check succeeds within the startPeriod, then the container\n\t\t\t\tis considered healthy and any subsequent failures count toward the maximum number of\n\t\t\t\tretries.

\n
" } } }, "traits": { - "smithy.api#documentation": "

An object representing a container health check. Health check parameters that are specified in a\n\t\t\tcontainer definition override any Docker health checks that exist in the container image (such as those\n\t\t\tspecified in a parent image or from the image's Dockerfile). This configuration maps to the\n\t\t\t\tHEALTHCHECK parameter of docker run.

\n \n

The Amazon ECS container agent only monitors and reports on the health checks specified in the task\n\t\t\t\tdefinition. Amazon ECS does not monitor Docker health checks that are embedded in a container image and\n\t\t\t\tnot specified in the container definition. Health check parameters that are specified in a\n\t\t\t\tcontainer definition override any Docker health checks that exist in the container image.

\n
\n

You can view the health status of both individual containers and a task with the DescribeTasks API\n\t\t\toperation or when viewing the task details in the console.

\n

The health check is designed to make sure that your containers survive agent restarts, upgrades, or\n\t\t\ttemporary unavailability.

\n

Amazon ECS performs health checks on containers with the default that launched the container instance or\n\t\t\tthe task.

\n

The following describes the possible healthStatus values for a container:

\n
    \n
  • \n

    \n HEALTHY-The container health check has passed successfully.

    \n
  • \n
  • \n

    \n UNHEALTHY-The container health check has failed.

    \n
  • \n
  • \n

    \n UNKNOWN-The container health check is being evaluated, there's no\n\t\t\t\t\tcontainer health check defined, or Amazon ECS doesn't have the health status of the\n\t\t\t\t\tcontainer.

    \n
  • \n
\n

The following describes the possible healthStatus values based on the container health\n\t\t\tchecker status of essential containers in the task with the following priority order (high to\n\t\t\tlow):

\n
    \n
  • \n

    \n UNHEALTHY-One or more essential containers have failed their health\n\t\t\t\t\tcheck.

    \n
  • \n
  • \n

    \n UNKNOWN-Any essential container running within the task is in an\n\t\t\t\t\t\tUNKNOWN state and no other essential containers have an UNHEALTHY\n\t\t\t\t\tstate.

    \n
  • \n
  • \n

    \n HEALTHY-All essential containers within the task have passed their health\n\t\t\t\t\tchecks.

    \n
  • \n
\n

Consider the following task health example with 2 containers.

\n
    \n
  • \n

    If Container1 is UNHEALTHY and Container2 is UNKNOWN, the task\n\t\t\t\t\thealth is UNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is UNHEALTHY and Container2 is HEALTHY, the task\n\t\t\t\t\thealth is UNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is UNKNOWN, the task health\n\t\t\t\t\tis UNKNOWN.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is HEALTHY, the task health\n\t\t\t\t\tis HEALTHY.

    \n
  • \n
\n

Consider the following task health example with 3 containers.

\n
    \n
  • \n

    If Container1 is UNHEALTHY and Container2 is UNKNOWN, and\n\t\t\t\t\tContainer3 is UNKNOWN, the task health is UNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is UNHEALTHY and Container2 is UNKNOWN, and\n\t\t\t\t\tContainer3 is HEALTHY, the task health is UNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is UNHEALTHY and Container2 is HEALTHY, and\n\t\t\t\t\tContainer3 is HEALTHY, the task health is UNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is UNKNOWN, and Container3\n\t\t\t\t\tis HEALTHY, the task health is UNKNOWN.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is UNKNOWN, and Container3\n\t\t\t\t\tis UNKNOWN, the task health is UNKNOWN.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is HEALTHY, and Container3\n\t\t\t\t\tis HEALTHY, the task health is HEALTHY.

    \n
  • \n
\n

If a task is run manually, and not as part of a service, the task will continue its lifecycle\n\t\t\tregardless of its health status. For tasks that are part of a service, if the task reports as unhealthy\n\t\t\tthen the task will be stopped and the service scheduler will replace it.

\n

The following are notes about container health check support:

\n
    \n
  • \n

    If the Amazon ECS container agent becomes disconnected from the Amazon ECS service, this won't cause a\n\t\t\t\t\tcontainer to transition to an UNHEALTHY status. This is by design, to ensure that\n\t\t\t\t\tcontainers remain running during agent restarts or temporary unavailability. The health check\n\t\t\t\t\tstatus is the \"last heard from\" response from the Amazon ECS agent, so if the container was\n\t\t\t\t\tconsidered HEALTHY prior to the disconnect, that status will remain until the\n\t\t\t\t\tagent reconnects and another health check occurs. There are no assumptions made about the\n\t\t\t\t\tstatus of the container health checks.

    \n
  • \n
  • \n

    Container health checks require version 1.17.0 or greater of the Amazon ECS container\n\t\t\t\t\tagent. For more information, see Updating the Amazon ECS container\n\t\t\t\t\t\tagent.

    \n
  • \n
  • \n

    Container health checks are supported for Fargate tasks if you're using\n\t\t\t\t\tplatform version 1.1.0 or greater. For more information, see Fargate\n\t\t\t\t\t\tplatform versions.

    \n
  • \n
  • \n

    Container health checks aren't supported for tasks that are part of a service that's\n\t\t\t\t\tconfigured to use a Classic Load Balancer.

    \n
  • \n
" + "smithy.api#documentation": "

An object representing a container health check. Health check parameters that are\n\t\t\tspecified in a container definition override any Docker health checks that exist in the\n\t\t\tcontainer image (such as those specified in a parent image or from the image's\n\t\t\tDockerfile). This configuration maps to the HEALTHCHECK parameter of docker\n\t\t\trun.

\n \n

The Amazon ECS container agent only monitors and reports on the health checks specified\n\t\t\t\tin the task definition. Amazon ECS does not monitor Docker health checks that are\n\t\t\t\tembedded in a container image and not specified in the container definition. Health\n\t\t\t\tcheck parameters that are specified in a container definition override any Docker\n\t\t\t\thealth checks that exist in the container image.

\n
\n

You can view the health status of both individual containers and a task with the\n\t\t\tDescribeTasks API operation or when viewing the task details in the console.

\n

The health check is designed to make sure that your containers survive agent restarts,\n\t\t\tupgrades, or temporary unavailability.

\n

Amazon ECS performs health checks on containers with the default that launched the\n\t\t\tcontainer instance or the task.

\n

The following describes the possible healthStatus values for a\n\t\t\tcontainer:

\n
    \n
  • \n

    \n HEALTHY-The container health check has passed\n\t\t\t\t\tsuccessfully.

    \n
  • \n
  • \n

    \n UNHEALTHY-The container health check has failed.

    \n
  • \n
  • \n

    \n UNKNOWN-The container health check is being evaluated,\n\t\t\t\t\tthere's no container health check defined, or Amazon ECS doesn't have the health\n\t\t\t\t\tstatus of the container.

    \n
  • \n
\n

The following describes the possible healthStatus values based on the\n\t\t\tcontainer health checker status of essential containers in the task with the following\n\t\t\tpriority order (high to low):

\n
    \n
  • \n

    \n UNHEALTHY-One or more essential containers have failed\n\t\t\t\t\ttheir health check.

    \n
  • \n
  • \n

    \n UNKNOWN-Any essential container running within the task is\n\t\t\t\t\tin an UNKNOWN state and no other essential containers have an\n\t\t\t\t\t\tUNHEALTHY state.

    \n
  • \n
  • \n

    \n HEALTHY-All essential containers within the task have\n\t\t\t\t\tpassed their health checks.

    \n
  • \n
\n

Consider the following task health example with 2 containers.

\n
    \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tUNKNOWN, the task health is UNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tHEALTHY, the task health is UNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is UNKNOWN,\n\t\t\t\t\tthe task health is UNKNOWN.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is HEALTHY,\n\t\t\t\t\tthe task health is HEALTHY.

    \n
  • \n
\n

Consider the following task health example with 3 containers.

\n
    \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tUNKNOWN, and Container3 is UNKNOWN, the task health is\n\t\t\t\t\t\tUNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tUNKNOWN, and Container3 is HEALTHY, the task health is\n\t\t\t\t\t\tUNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tHEALTHY, and Container3 is HEALTHY, the task health is\n\t\t\t\t\t\tUNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is UNKNOWN,\n\t\t\t\t\tand Container3 is HEALTHY, the task health is\n\t\t\t\t\tUNKNOWN.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is UNKNOWN,\n\t\t\t\t\tand Container3 is UNKNOWN, the task health is\n\t\t\t\t\tUNKNOWN.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is HEALTHY,\n\t\t\t\t\tand Container3 is HEALTHY, the task health is\n\t\t\t\t\tHEALTHY.

    \n
  • \n
\n

If a task is run manually, and not as part of a service, the task will continue its\n\t\t\tlifecycle regardless of its health status. For tasks that are part of a service, if the\n\t\t\ttask reports as unhealthy then the task will be stopped and the service scheduler will\n\t\t\treplace it.

\n

The following are notes about container health check support:

\n
    \n
  • \n

    If the Amazon ECS container agent becomes disconnected from the Amazon ECS service, this\n\t\t\t\t\twon't cause a container to transition to an UNHEALTHY status. This\n\t\t\t\t\tis by design, to ensure that containers remain running during agent restarts or\n\t\t\t\t\ttemporary unavailability. The health check status is the \"last heard from\"\n\t\t\t\t\tresponse from the Amazon ECS agent, so if the container was considered\n\t\t\t\t\t\tHEALTHY prior to the disconnect, that status will remain until\n\t\t\t\t\tthe agent reconnects and another health check occurs. There are no assumptions\n\t\t\t\t\tmade about the status of the container health checks.

    \n
  • \n
  • \n

    Container health checks require version 1.17.0 or greater of the\n\t\t\t\t\tAmazon ECS container agent. For more information, see Updating the\n\t\t\t\t\t\tAmazon ECS container agent.

    \n
  • \n
  • \n

    Container health checks are supported for Fargate tasks if\n\t\t\t\t\tyou're using platform version 1.1.0 or greater. For more\n\t\t\t\t\tinformation, see Fargate\n\t\t\t\t\t\tplatform versions.

    \n
  • \n
  • \n

    Container health checks aren't supported for tasks that are part of a service\n\t\t\t\t\tthat's configured to use a Classic Load Balancer.

    \n
  • \n
" } }, "com.amazonaws.ecs#HealthStatus": { @@ -6601,7 +6601,7 @@ } }, "traits": { - "smithy.api#documentation": "

Hostnames and IP address entries that are added to the /etc/hosts file of a container\n\t\t\tvia the extraHosts parameter of its ContainerDefinition.\n\t\t

" + "smithy.api#documentation": "

Hostnames and IP address entries that are added to the /etc/hosts file of\n\t\t\ta container via the extraHosts parameter of its ContainerDefinition.

" } }, "com.amazonaws.ecs#HostEntryList": { @@ -6616,7 +6616,7 @@ "sourcePath": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

When the host parameter is used, specify a sourcePath to declare the path\n\t\t\ton the host container instance that's presented to the container. If this parameter is empty, then the\n\t\t\tDocker daemon has assigned a host path for you. If the host parameter contains a\n\t\t\t\tsourcePath file location, then the data volume persists at the specified location on\n\t\t\tthe host container instance until you delete it manually. If the sourcePath value doesn't\n\t\t\texist on the host container instance, the Docker daemon creates it. If the location does exist, the\n\t\t\tcontents of the source path folder are exported.

\n

If you're using the Fargate launch type, the sourcePath parameter is not\n\t\t\tsupported.

" + "smithy.api#documentation": "

When the host parameter is used, specify a sourcePath to\n\t\t\tdeclare the path on the host container instance that's presented to the container. If\n\t\t\tthis parameter is empty, then the Docker daemon has assigned a host path for you. If the\n\t\t\t\thost parameter contains a sourcePath file location, then\n\t\t\tthe data volume persists at the specified location on the host container instance until\n\t\t\tyou delete it manually. If the sourcePath value doesn't exist on the host\n\t\t\tcontainer instance, the Docker daemon creates it. If the location does exist, the\n\t\t\tcontents of the source path folder are exported.

\n

If you're using the Fargate launch type, the sourcePath\n\t\t\tparameter is not supported.

" } } }, @@ -6633,7 +6633,7 @@ "deviceName": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Elastic Inference accelerator device name. The deviceName must also be referenced in\n\t\t\ta container definition as a ResourceRequirement.

", + "smithy.api#documentation": "

The Elastic Inference accelerator device name. The deviceName must also\n\t\t\tbe referenced in a container definition as a ResourceRequirement.

", "smithy.api#required": {} } }, @@ -6646,7 +6646,7 @@ } }, "traits": { - "smithy.api#documentation": "

Details on an Elastic Inference accelerator. For more information, see Working with Amazon Elastic Inference on\n\t\t\t\tAmazon ECS in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Details on an Elastic Inference accelerator. For more information, see Working with Amazon Elastic Inference on Amazon ECS in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#InferenceAcceleratorOverride": { @@ -6655,7 +6655,7 @@ "deviceName": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Elastic Inference accelerator device name to override for the task. This parameter must match a\n\t\t\t\tdeviceName specified in the task definition.

" + "smithy.api#documentation": "

The Elastic Inference accelerator device name to override for the task. This parameter\n\t\t\tmust match a deviceName specified in the task definition.

" } }, "deviceType": { @@ -6666,7 +6666,7 @@ } }, "traits": { - "smithy.api#documentation": "

Details on an Elastic Inference accelerator task override. This parameter is used to override the\n\t\t\tElastic Inference accelerator specified in the task definition. For more information, see Working with Amazon Elastic Inference on Amazon ECS in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Details on an Elastic Inference accelerator task override. This parameter is used to\n\t\t\toverride the Elastic Inference accelerator specified in the task definition. For more\n\t\t\tinformation, see Working with Amazon\n\t\t\t\tElastic Inference on Amazon ECS in the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#InferenceAcceleratorOverrides": { @@ -6699,7 +6699,7 @@ "lastUpdated": { "target": "com.amazonaws.ecs#Timestamp", "traits": { - "smithy.api#documentation": "

The Unix timestamp for when the container instance health status was last updated.

" + "smithy.api#documentation": "

The Unix timestamp for when the container instance health status was last\n\t\t\tupdated.

" } }, "lastStatusChange": { @@ -6782,7 +6782,7 @@ } }, "traits": { - "smithy.api#documentation": "

The specified parameter isn't valid. Review the available parameters for the API request.

", + "smithy.api#documentation": "

The specified parameter isn't valid. Review the available parameters for the API\n\t\t\trequest.

", "smithy.api#error": "client" } }, @@ -6815,18 +6815,18 @@ "add": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The Linux capabilities for the container that have been added to the default configuration provided\n\t\t\tby Docker. This parameter maps to CapAdd in the docker container create command and the\n\t\t\t\t--cap-add option to docker run.

\n \n

Tasks launched on Fargate only support adding the SYS_PTRACE kernel\n\t\t\t\tcapability.

\n
\n

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" |\n\t\t\t\t\"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" |\n\t\t\t\t\"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" |\n\t\t\t\t\"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" |\n\t\t\t\t\"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" |\n\t\t\t\t\"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" |\n\t\t\t\"WAKE_ALARM\"\n

" + "smithy.api#documentation": "

The Linux capabilities for the container that have been added to the default\n\t\t\tconfiguration provided by Docker. This parameter maps to CapAdd in the\n\t\t\tdocker container create command and the --cap-add option to docker\n\t\t\trun.

\n \n

Tasks launched on Fargate only support adding the SYS_PTRACE kernel\n\t\t\t\tcapability.

\n
\n

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" |\n\t\t\t\t\"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" |\n\t\t\t\t\"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" |\n\t\t\t\t\"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\"\n\t\t\t\t| \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" |\n\t\t\t\t\"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" |\n\t\t\t\t\"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" |\n\t\t\t\"WAKE_ALARM\"\n

" } }, "drop": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The Linux capabilities for the container that have been removed from the default configuration\n\t\t\tprovided by Docker. This parameter maps to CapDrop in the docker container create command\n\t\t\tand the --cap-drop option to docker run.

\n

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" |\n\t\t\t\t\"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" |\n\t\t\t\t\"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" |\n\t\t\t\t\"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" |\n\t\t\t\t\"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" |\n\t\t\t\t\"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" |\n\t\t\t\"WAKE_ALARM\"\n

" + "smithy.api#documentation": "

The Linux capabilities for the container that have been removed from the default\n\t\t\tconfiguration provided by Docker. This parameter maps to CapDrop in the\n\t\t\tdocker container create command and the --cap-drop option to docker\n\t\t\trun.

\n

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" |\n\t\t\t\t\"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" |\n\t\t\t\t\"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" |\n\t\t\t\t\"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\"\n\t\t\t\t| \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" |\n\t\t\t\t\"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" |\n\t\t\t\t\"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" |\n\t\t\t\"WAKE_ALARM\"\n

" } } }, "traits": { - "smithy.api#documentation": "

The Linux capabilities to add or remove from the default Docker configuration for a container defined\n\t\t\tin the task definition. For more detailed information about these Linux capabilities, see the capabilities(7) Linux manual\n\t\t\tpage.

" + "smithy.api#documentation": "

The Linux capabilities to add or remove from the default Docker configuration for a\n\t\t\tcontainer defined in the task definition. For more detailed information about these\n\t\t\tLinux capabilities, see the capabilities(7) Linux manual page.

" } }, "com.amazonaws.ecs#KeyValuePair": { @@ -6835,13 +6835,13 @@ "name": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of the key-value pair. For environment variables, this is the name of the environment\n\t\t\tvariable.

" + "smithy.api#documentation": "

The name of the key-value pair. For environment variables, this is the name of the\n\t\t\tenvironment variable.

" } }, "value": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The value of the key-value pair. For environment variables, this is the value of the environment\n\t\t\tvariable.

" + "smithy.api#documentation": "

The value of the key-value pair. For environment variables, this is the value of the\n\t\t\tenvironment variable.

" } } }, @@ -6893,43 +6893,43 @@ "capabilities": { "target": "com.amazonaws.ecs#KernelCapabilities", "traits": { - "smithy.api#documentation": "

The Linux capabilities for the container that are added to or dropped from the default configuration\n\t\t\tprovided by Docker.

\n \n

For tasks that use the Fargate launch type, capabilities is supported\n\t\t\t\tfor all platform versions but the add parameter is only supported if using platform\n\t\t\t\tversion 1.4.0 or later.

\n
" + "smithy.api#documentation": "

The Linux capabilities for the container that are added to or dropped from the default\n\t\t\tconfiguration provided by Docker.

\n \n

For tasks that use the Fargate launch type,\n\t\t\t\t\tcapabilities is supported for all platform versions but the\n\t\t\t\t\tadd parameter is only supported if using platform version 1.4.0 or\n\t\t\t\tlater.

\n
" } }, "devices": { "target": "com.amazonaws.ecs#DevicesList", "traits": { - "smithy.api#documentation": "

Any host devices to expose to the container. This parameter maps to Devices in the\n\t\t\tdocker container create command and the --device option to docker run.

\n \n

If you're using tasks that use the Fargate launch type, the devices\n\t\t\t\tparameter isn't supported.

\n
" + "smithy.api#documentation": "

Any host devices to expose to the container. This parameter maps to\n\t\t\t\tDevices in the docker container create command and the\n\t\t\t\t--device option to docker run.

\n \n

If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\tdevices parameter isn't supported.

\n
" } }, "initProcessEnabled": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

Run an init process inside the container that forwards signals and reaps processes. This\n\t\t\tparameter maps to the --init option to docker run. This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

" + "smithy.api#documentation": "

Run an init process inside the container that forwards signals and reaps\n\t\t\tprocesses. This parameter maps to the --init option to docker run.\n\t\t\tThis parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

" } }, "sharedMemorySize": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The value for the size (in MiB) of the /dev/shm volume. This parameter maps to the\n\t\t\t\t--shm-size option to docker run.

\n \n

If you are using tasks that use the Fargate launch type, the\n\t\t\t\t\tsharedMemorySize parameter is not supported.

\n
" + "smithy.api#documentation": "

The value for the size (in MiB) of the /dev/shm volume. This parameter\n\t\t\tmaps to the --shm-size option to docker run.

\n \n

If you are using tasks that use the Fargate launch type, the\n\t\t\t\t\tsharedMemorySize parameter is not supported.

\n
" } }, "tmpfs": { "target": "com.amazonaws.ecs#TmpfsList", "traits": { - "smithy.api#documentation": "

The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the\n\t\t\t\t--tmpfs option to docker run.

\n \n

If you're using tasks that use the Fargate launch type, the tmpfs\n\t\t\t\tparameter isn't supported.

\n
" + "smithy.api#documentation": "

The container path, mount options, and size (in MiB) of the tmpfs mount. This\n\t\t\tparameter maps to the --tmpfs option to docker run.

\n \n

If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\ttmpfs parameter isn't supported.

\n
" } }, "maxSwap": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The total amount of swap memory (in MiB) a container can use. This parameter will be translated to\n\t\t\tthe --memory-swap option to docker run where the value would be the sum of the container\n\t\t\tmemory plus the maxSwap value.

\n

If a maxSwap value of 0 is specified, the container will not use swap.\n\t\t\tAccepted values are 0 or any positive integer. If the maxSwap parameter is\n\t\t\tomitted, the container will use the swap configuration for the container instance it is running on. A\n\t\t\t\tmaxSwap value must be set for the swappiness parameter to be used.

\n \n

If you're using tasks that use the Fargate launch type, the maxSwap\n\t\t\t\tparameter isn't supported.

\n

If you're using tasks on Amazon Linux 2023 the swappiness parameter isn't supported.

\n
" + "smithy.api#documentation": "

The total amount of swap memory (in MiB) a container can use. This parameter will be\n\t\t\ttranslated to the --memory-swap option to docker run where the value would\n\t\t\tbe the sum of the container memory plus the maxSwap value.

\n

If a maxSwap value of 0 is specified, the container will not\n\t\t\tuse swap. Accepted values are 0 or any positive integer. If the\n\t\t\t\tmaxSwap parameter is omitted, the container will use the swap\n\t\t\tconfiguration for the container instance it is running on. A maxSwap value\n\t\t\tmust be set for the swappiness parameter to be used.

\n \n

If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\tmaxSwap parameter isn't supported.

\n

If you're using tasks on Amazon Linux 2023 the swappiness parameter isn't\n\t\t\t\tsupported.

\n
" } }, "swappiness": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

This allows you to tune a container's memory swappiness behavior. A swappiness value of\n\t\t\t\t0 will cause swapping to not happen unless absolutely necessary. A\n\t\t\t\tswappiness value of 100 will cause pages to be swapped very aggressively.\n\t\t\tAccepted values are whole numbers between 0 and 100. If the\n\t\t\t\tswappiness parameter is not specified, a default value of 60 is used. If\n\t\t\ta value is not specified for maxSwap then this parameter is ignored. This parameter maps\n\t\t\tto the --memory-swappiness option to docker run.

\n \n

If you're using tasks that use the Fargate launch type, the swappiness\n\t\t\t\tparameter isn't supported.

\n

If you're using tasks on Amazon Linux 2023 the swappiness parameter isn't supported.

\n
" + "smithy.api#documentation": "

This allows you to tune a container's memory swappiness behavior. A\n\t\t\t\tswappiness value of 0 will cause swapping to not happen\n\t\t\tunless absolutely necessary. A swappiness value of 100 will\n\t\t\tcause pages to be swapped very aggressively. Accepted values are whole numbers between\n\t\t\t\t0 and 100. If the swappiness parameter is not\n\t\t\tspecified, a default value of 60 is used. If a value is not specified for\n\t\t\t\tmaxSwap then this parameter is ignored. This parameter maps to the\n\t\t\t\t--memory-swappiness option to docker run.

\n \n

If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\tswappiness parameter isn't supported.

\n

If you're using tasks on Amazon Linux 2023 the swappiness parameter isn't\n\t\t\t\tsupported.

\n
" } } }, @@ -7033,33 +7033,33 @@ "value": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The value of the account settings to filter results with. You must also specify an account setting\n\t\t\tname to use this parameter.

" + "smithy.api#documentation": "

The value of the account settings to filter results with. You must also specify an\n\t\t\taccount setting name to use this parameter.

" } }, "principalArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The ARN of the principal, which can be a user, role, or the root user. If this field is omitted, the\n\t\t\taccount settings are listed only for the authenticated user.

\n \n

Federated users assume the account setting of the root user and can't have explicit account settings\n\t\t\t\tset for them.

\n
" + "smithy.api#documentation": "

The ARN of the principal, which can be a user, role, or the root user. If this field is\n\t\t\tomitted, the account settings are listed only for the authenticated user.

\n \n

Federated users assume the account setting of the root user and can't have explicit\n\t\t\t\taccount settings set for them.

\n
" } }, "effectiveSettings": { "target": "com.amazonaws.ecs#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Determines whether to return the effective settings. If true, the account settings for\n\t\t\tthe root user or the default setting for the principalArn are returned. If\n\t\t\tfalse, the account settings for the principalArn are returned if they're set.\n\t\t\tOtherwise, no account settings are returned.

" + "smithy.api#documentation": "

Determines whether to return the effective settings. If true, the account\n\t\t\tsettings for the root user or the default setting for the principalArn are\n\t\t\treturned. If false, the account settings for the principalArn\n\t\t\tare returned if they're set. Otherwise, no account settings are returned.

" } }, "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The nextToken value returned from a ListAccountSettings request indicating\n\t\t\tthat more results are available to fulfill the request and further calls will be needed. If\n\t\t\t\tmaxResults was provided, it's possible the number of results to be fewer than\n\t\t\t\tmaxResults.

\n \n

This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.

\n
" + "smithy.api#documentation": "

The nextToken value returned from a ListAccountSettings\n\t\t\trequest indicating that more results are available to fulfill the request and further\n\t\t\tcalls will be needed. If maxResults was provided, it's possible the number\n\t\t\tof results to be fewer than maxResults.

\n \n

This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.

\n
" } }, "maxResults": { "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The maximum number of account setting results returned by ListAccountSettings in\n\t\t\tpaginated output. When this parameter is used, ListAccountSettings only returns\n\t\t\t\tmaxResults results in a single page along with a nextToken response\n\t\t\telement. The remaining results of the initial request can be seen by sending another\n\t\t\t\tListAccountSettings request with the returned nextToken value. This value\n\t\t\tcan be between 1 and 10. If this\n\t\t\tparameter isn't used, then ListAccountSettings returns up to\n\t\t\t10 results and a nextToken value if\n\t\t\tapplicable.

" + "smithy.api#documentation": "

The maximum number of account setting results returned by\n\t\t\t\tListAccountSettings in paginated output. When this parameter is used,\n\t\t\t\tListAccountSettings only returns maxResults results in a\n\t\t\tsingle page along with a nextToken response element. The remaining results\n\t\t\tof the initial request can be seen by sending another ListAccountSettings\n\t\t\trequest with the returned nextToken value. This value can be between\n\t\t\t1 and 10. If this\n\t\t\tparameter isn't used, then ListAccountSettings returns up to\n\t\t\t10 results and a nextToken value\n\t\t\tif applicable.

" } } }, @@ -7079,7 +7079,7 @@ "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The nextToken value to include in a future ListAccountSettings request.\n\t\t\tWhen the results of a ListAccountSettings request exceed maxResults, this\n\t\t\tvalue can be used to retrieve the next page of results. This value is null when there are\n\t\t\tno more results to return.

" + "smithy.api#documentation": "

The nextToken value to include in a future\n\t\t\t\tListAccountSettings request. When the results of a\n\t\t\t\tListAccountSettings request exceed maxResults, this value\n\t\t\tcan be used to retrieve the next page of results. This value is null when\n\t\t\tthere are no more results to return.

" } } }, @@ -7104,7 +7104,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the attributes for Amazon ECS resources within a specified target type and cluster. When you specify\n\t\t\ta target type and cluster, ListAttributes returns a list of attribute objects, one for\n\t\t\teach attribute on each resource. You can filter the list of results to a single attribute name to only\n\t\t\treturn results that have that name. You can also filter the results by attribute name and value. You\n\t\t\tcan do this, for example, to see which container instances in a cluster are running a Linux AMI\n\t\t\t\t(ecs.os-type=linux).

", + "smithy.api#documentation": "

Lists the attributes for Amazon ECS resources within a specified target type and cluster.\n\t\t\tWhen you specify a target type and cluster, ListAttributes returns a list\n\t\t\tof attribute objects, one for each attribute on each resource. You can filter the list\n\t\t\tof results to a single attribute name to only return results that have that name. You\n\t\t\tcan also filter the results by attribute name and value. You can do this, for example,\n\t\t\tto see which container instances in a cluster are running a Linux AMI\n\t\t\t\t(ecs.os-type=linux).

", "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", @@ -7119,7 +7119,7 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster to list attributes. If you do not specify a cluster, the default cluster is assumed.

" + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster to list attributes.\n\t\t\tIf you do not specify a cluster, the default cluster is assumed.

" } }, "targetType": { @@ -7138,19 +7138,19 @@ "attributeValue": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The value of the attribute to filter results with. You must also specify an attribute name to use\n\t\t\tthis parameter.

" + "smithy.api#documentation": "

The value of the attribute to filter results with. You must also specify an attribute\n\t\t\tname to use this parameter.

" } }, "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The nextToken value returned from a ListAttributes request indicating that\n\t\t\tmore results are available to fulfill the request and further calls are needed. If\n\t\t\t\tmaxResults was provided, it's possible the number of results to be fewer than\n\t\t\t\tmaxResults.

\n \n

This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.

\n
" + "smithy.api#documentation": "

The nextToken value returned from a ListAttributes request\n\t\t\tindicating that more results are available to fulfill the request and further calls are\n\t\t\tneeded. If maxResults was provided, it's possible the number of results to\n\t\t\tbe fewer than maxResults.

\n \n

This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.

\n
" } }, "maxResults": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The maximum number of cluster results that ListAttributes returned in paginated output.\n\t\t\tWhen this parameter is used, ListAttributes only returns maxResults results\n\t\t\tin a single page along with a nextToken response element. The remaining results of the\n\t\t\tinitial request can be seen by sending another ListAttributes request with the returned\n\t\t\t\tnextToken value. This value can be between 1 and 100. If\n\t\t\tthis parameter isn't used, then ListAttributes returns up to 100 results\n\t\t\tand a nextToken value if applicable.

" + "smithy.api#documentation": "

The maximum number of cluster results that ListAttributes returned in\n\t\t\tpaginated output. When this parameter is used, ListAttributes only returns\n\t\t\t\tmaxResults results in a single page along with a nextToken\n\t\t\tresponse element. The remaining results of the initial request can be seen by sending\n\t\t\tanother ListAttributes request with the returned nextToken\n\t\t\tvalue. This value can be between 1 and 100. If this\n\t\t\tparameter isn't used, then ListAttributes returns up to\n\t\t\t100 results and a nextToken value if applicable.

" } } }, @@ -7170,7 +7170,7 @@ "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The nextToken value to include in a future ListAttributes request. When the\n\t\t\tresults of a ListAttributes request exceed maxResults, this value can be used\n\t\t\tto retrieve the next page of results. This value is null when there are no more results to\n\t\t\treturn.

" + "smithy.api#documentation": "

The nextToken value to include in a future ListAttributes\n\t\t\trequest. When the results of a ListAttributes request exceed\n\t\t\t\tmaxResults, this value can be used to retrieve the next page of\n\t\t\tresults. This value is null when there are no more results to\n\t\t\treturn.

" } } }, @@ -7238,13 +7238,13 @@ "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The nextToken value returned from a ListClusters request indicating that\n\t\t\tmore results are available to fulfill the request and further calls are needed. If\n\t\t\t\tmaxResults was provided, it's possible the number of results to be fewer than\n\t\t\t\tmaxResults.

\n \n

This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.

\n
" + "smithy.api#documentation": "

The nextToken value returned from a ListClusters request\n\t\t\tindicating that more results are available to fulfill the request and further calls are\n\t\t\tneeded. If maxResults was provided, it's possible the number of results to\n\t\t\tbe fewer than maxResults.

\n \n

This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.

\n
" } }, "maxResults": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The maximum number of cluster results that ListClusters returned in paginated output.\n\t\t\tWhen this parameter is used, ListClusters only returns maxResults results in\n\t\t\ta single page along with a nextToken response element. The remaining results of the\n\t\t\tinitial request can be seen by sending another ListClusters request with the returned\n\t\t\t\tnextToken value. This value can be between 1 and 100. If\n\t\t\tthis parameter isn't used, then ListClusters returns up to 100 results\n\t\t\tand a nextToken value if applicable.

" + "smithy.api#documentation": "

The maximum number of cluster results that ListClusters returned in\n\t\t\tpaginated output. When this parameter is used, ListClusters only returns\n\t\t\t\tmaxResults results in a single page along with a nextToken\n\t\t\tresponse element. The remaining results of the initial request can be seen by sending\n\t\t\tanother ListClusters request with the returned nextToken\n\t\t\tvalue. This value can be between 1 and 100. If this\n\t\t\tparameter isn't used, then ListClusters returns up to 100\n\t\t\tresults and a nextToken value if applicable.

" } } }, @@ -7258,13 +7258,13 @@ "clusterArns": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The list of full Amazon Resource Name (ARN) entries for each cluster that's associated with your account.

" + "smithy.api#documentation": "

The list of full Amazon Resource Name (ARN) entries for each cluster that's associated with your\n\t\t\taccount.

" } }, "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The nextToken value to include in a future ListClusters request. When the\n\t\t\tresults of a ListClusters request exceed maxResults, this value can be used\n\t\t\tto retrieve the next page of results. This value is null when there are no more results to\n\t\t\treturn.

" + "smithy.api#documentation": "

The nextToken value to include in a future ListClusters\n\t\t\trequest. When the results of a ListClusters request exceed\n\t\t\t\tmaxResults, this value can be used to retrieve the next page of\n\t\t\tresults. This value is null when there are no more results to\n\t\t\treturn.

" } } }, @@ -7295,7 +7295,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of container instances in a specified cluster. You can filter the results of a\n\t\t\t\tListContainerInstances operation with cluster query language statements inside the\n\t\t\t\tfilter parameter. For more information, see Cluster\n\t\t\t\tQuery Language in the Amazon Elastic Container Service Developer Guide.

", + "smithy.api#documentation": "

Returns a list of container instances in a specified cluster. You can filter the\n\t\t\tresults of a ListContainerInstances operation with cluster query language\n\t\t\tstatements inside the filter parameter. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

", "smithy.api#examples": [ { "title": "To list your available container instances in a cluster", @@ -7325,31 +7325,31 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to list.\n\t\t\tIf you do not specify a cluster, the default cluster is assumed.

" + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to\n\t\t\tlist. If you do not specify a cluster, the default cluster is assumed.

" } }, "filter": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

You can filter the results of a ListContainerInstances operation with cluster query\n\t\t\tlanguage statements. For more information, see Cluster\n\t\t\t\tQuery Language in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

You can filter the results of a ListContainerInstances operation with\n\t\t\tcluster query language statements. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

" } }, "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The nextToken value returned from a ListContainerInstances request\n\t\t\tindicating that more results are available to fulfill the request and further calls are needed. If\n\t\t\t\tmaxResults was provided, it's possible the number of results to be fewer than\n\t\t\t\tmaxResults.

\n \n

This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.

\n
" + "smithy.api#documentation": "

The nextToken value returned from a ListContainerInstances\n\t\t\trequest indicating that more results are available to fulfill the request and further\n\t\t\tcalls are needed. If maxResults was provided, it's possible the number of\n\t\t\tresults to be fewer than maxResults.

\n \n

This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.

\n
" } }, "maxResults": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The maximum number of container instance results that ListContainerInstances returned in\n\t\t\tpaginated output. When this parameter is used, ListContainerInstances only returns\n\t\t\t\tmaxResults results in a single page along with a nextToken response\n\t\t\telement. The remaining results of the initial request can be seen by sending another\n\t\t\t\tListContainerInstances request with the returned nextToken value. This\n\t\t\tvalue can be between 1 and 100. If this parameter isn't used, then\n\t\t\t\tListContainerInstances returns up to 100 results and a\n\t\t\t\tnextToken value if applicable.

" + "smithy.api#documentation": "

The maximum number of container instance results that\n\t\t\t\tListContainerInstances returned in paginated output. When this\n\t\t\tparameter is used, ListContainerInstances only returns\n\t\t\t\tmaxResults results in a single page along with a nextToken\n\t\t\tresponse element. The remaining results of the initial request can be seen by sending\n\t\t\tanother ListContainerInstances request with the returned\n\t\t\t\tnextToken value. This value can be between 1 and\n\t\t\t100. If this parameter isn't used, then\n\t\t\t\tListContainerInstances returns up to 100 results and\n\t\t\ta nextToken value if applicable.

" } }, "status": { "target": "com.amazonaws.ecs#ContainerInstanceStatus", "traits": { - "smithy.api#documentation": "

Filters the container instances by status. For example, if you specify the DRAINING\n\t\t\tstatus, the results include only container instances that have been set to DRAINING using\n\t\t\t\tUpdateContainerInstancesState. If you don't specify this parameter, the default is to\n\t\t\tinclude container instances set to all states other than INACTIVE.

" + "smithy.api#documentation": "

Filters the container instances by status. For example, if you specify the\n\t\t\t\tDRAINING status, the results include only container instances that have\n\t\t\tbeen set to DRAINING using UpdateContainerInstancesState. If you don't specify this parameter, the\n\t\t\tdefault is to include container instances set to all states other than\n\t\t\t\tINACTIVE.

" } } }, @@ -7363,13 +7363,13 @@ "containerInstanceArns": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The list of container instances with full ARN entries for each container instance associated with\n\t\t\tthe specified cluster.

" + "smithy.api#documentation": "

The list of container instances with full ARN entries for each container instance\n\t\t\tassociated with the specified cluster.

" } }, "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The nextToken value to include in a future ListContainerInstances request.\n\t\t\tWhen the results of a ListContainerInstances request exceed maxResults, this\n\t\t\tvalue can be used to retrieve the next page of results. This value is null when there are\n\t\t\tno more results to return.

" + "smithy.api#documentation": "

The nextToken value to include in a future\n\t\t\t\tListContainerInstances request. When the results of a\n\t\t\t\tListContainerInstances request exceed maxResults, this\n\t\t\tvalue can be used to retrieve the next page of results. This value is null\n\t\t\twhen there are no more results to return.

" } } }, @@ -7406,7 +7406,7 @@ } ], "traits": { - "smithy.api#documentation": "

This operation lists all the service deployments that meet the specified filter criteria.

\n

A service deployment happens when you release a softwre update for the service. You\n\t\t\troute traffic from the running service revisions to the new service revison and control\n\t\t\tthe number of running tasks.

\n

This API returns the values that you use for the request parameters in DescribeServiceRevisions.

" + "smithy.api#documentation": "

This operation lists all the service deployments that meet the specified filter\n\t\t\tcriteria.

\n

A service deployment happens when you release a softwre update for the service. You\n\t\t\troute traffic from the running service revisions to the new service revison and control\n\t\t\tthe number of running tasks.

\n

This API returns the values that you use for the request parameters in DescribeServiceRevisions.

" } }, "com.amazonaws.ecs#ListServiceDeploymentsRequest": { @@ -7422,31 +7422,31 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The cluster that hosts the service. This can either be the cluster name or ARN. Starting\n\t\t\tApril 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic\n\t\t\tInference (EI), and will help current customers migrate their workloads to options that\n\t\t\toffer better price and performanceIf you don't specify a cluster, default\n\t\t\tis used.

" + "smithy.api#documentation": "

The cluster that hosts the service. This can either be the cluster name or ARN.\n\t\t\tStarting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon\n\t\t\tElastic Inference (EI), and will help current customers migrate their workloads to\n\t\t\toptions that offer better price and performance. If you don't specify a cluster,\n\t\t\t\tdefault is used.

" } }, "status": { "target": "com.amazonaws.ecs#ServiceDeploymentStatusList", "traits": { - "smithy.api#documentation": "

An optional filter you can use to narrow the results. If you do not specify a status, then\n\t\t\tall status values are included in the result.

" + "smithy.api#documentation": "

An optional filter you can use to narrow the results. If you do not specify a status,\n\t\t\tthen all status values are included in the result.

" } }, "createdAt": { "target": "com.amazonaws.ecs#CreatedAt", "traits": { - "smithy.api#documentation": "

An optional filter you can use to narrow the results by the service creation date. If you do\n\t\t\tnot specify a value, the result includes all services created before the current\n\t\t\ttime. The\n\t\t\tformat is yyyy-MM-dd HH:mm:ss.SSSSSS.

" + "smithy.api#documentation": "

An optional filter you can use to narrow the results by the service creation date. If\n\t\t\tyou do not specify a value, the result includes all services created before the current\n\t\t\ttime. The format is yyyy-MM-dd HH:mm:ss.SSSSSS.

" } }, "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The nextToken value returned from a ListServiceDeployments request indicating that more results are available to fulfill the request and further calls are needed. If you provided maxResults, it's possible the number of results is fewer than maxResults.

" + "smithy.api#documentation": "

The nextToken value returned from a ListServiceDeployments\n\t\t\trequest indicating that more results are available to fulfill the request and further\n\t\t\tcalls are needed. If you provided maxResults, it's possible the number of\n\t\t\tresults is fewer than maxResults.

" } }, "maxResults": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The maximum number of service deployment results that ListServiceDeployments\n\t\t\treturned in paginated output. When this parameter is used,\n\t\t\tListServiceDeployments only returns maxResults results in\n\t\t\ta single page along with a nextToken response element. The remaining\n\t\t\tresults of the initial request can be seen by sending another\n\t\t\tListServiceDeployments request with the returned nextToken\n\t\t\tvalue. This value can be between 1 and 100. If this parameter isn't used, then\n\t\t\tListServiceDeployments returns up to 20 results and a\n\t\t\tnextToken value if applicable.

" + "smithy.api#documentation": "

The maximum number of service deployment results that\n\t\t\t\tListServiceDeployments returned in paginated output. When this\n\t\t\tparameter is used, ListServiceDeployments only returns\n\t\t\t\tmaxResults results in a single page along with a nextToken\n\t\t\tresponse element. The remaining results of the initial request can be seen by sending\n\t\t\tanother ListServiceDeployments request with the returned\n\t\t\t\tnextToken value. This value can be between 1 and 100. If this parameter\n\t\t\tisn't used, then ListServiceDeployments returns up to 20 results and a\n\t\t\t\tnextToken value if applicable.

" } } }, @@ -7460,13 +7460,13 @@ "serviceDeployments": { "target": "com.amazonaws.ecs#ServiceDeploymentsBrief", "traits": { - "smithy.api#documentation": "

An overview of the service deployment, including the following\n\t\t\tproperties:

\n
    \n
  • \n

    The ARN of the service deployment.

    \n
  • \n
  • \n

    The ARN of the service being deployed.

    \n
  • \n
  • \n

    The ARN of the cluster that hosts the service in the service deployment.

    \n
  • \n
  • \n

    The time that the service deployment started.

    \n
  • \n
  • \n

    The time that the service deployment completed.

    \n
  • \n
  • \n

    The service deployment status.

    \n
  • \n
  • \n

    Information about why the service deployment is in the current state.

    \n
  • \n
  • \n

    The ARN of the service revision that is being deployed.

    \n
  • \n
" + "smithy.api#documentation": "

An overview of the service deployment, including the following properties:

\n
    \n
  • \n

    The ARN of the service deployment.

    \n
  • \n
  • \n

    The ARN of the service being deployed.

    \n
  • \n
  • \n

    The ARN of the cluster that hosts the service in the service\n\t\t\t\t\tdeployment.

    \n
  • \n
  • \n

    The time that the service deployment started.

    \n
  • \n
  • \n

    The time that the service deployment completed.

    \n
  • \n
  • \n

    The service deployment status.

    \n
  • \n
  • \n

    Information about why the service deployment is in the current state.

    \n
  • \n
  • \n

    The ARN of the service revision that is being deployed.

    \n
  • \n
" } }, "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The nextToken value to include in a future ListServiceDeployments request. When the results of a ListServiceDeployments request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

" + "smithy.api#documentation": "

The nextToken value to include in a future\n\t\t\t\tListServiceDeployments request. When the results of a\n\t\t\t\tListServiceDeployments request exceed maxResults, this\n\t\t\tvalue can be used to retrieve the next page of results. This value is null when there\n\t\t\tare no more results to return.

" } } }, @@ -7497,7 +7497,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of services. You can filter the results by cluster, launch type, and scheduling\n\t\t\tstrategy.

", + "smithy.api#documentation": "

Returns a list of services. You can filter the results by cluster, launch type, and\n\t\t\tscheduling strategy.

", "smithy.api#examples": [ { "title": "To list the services in a cluster", @@ -7540,7 +7540,7 @@ } ], "traits": { - "smithy.api#documentation": "

This operation lists all of the services that are associated with a Cloud Map namespace. This list\n\t\t\tmight include services in different clusters. In contrast, ListServices can only list\n\t\t\tservices in one cluster at a time. If you need to filter the list of services in a single cluster by\n\t\t\tvarious parameters, use ListServices. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.

", + "smithy.api#documentation": "

This operation lists all of the services that are associated with a Cloud Map\n\t\t\tnamespace. This list might include services in different clusters. In contrast,\n\t\t\t\tListServices can only list services in one cluster at a time. If you\n\t\t\tneed to filter the list of services in a single cluster by various parameters, use\n\t\t\t\tListServices. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.

", "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", @@ -7562,13 +7562,13 @@ "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The nextToken value that's returned from a ListServicesByNamespace request.\n\t\t\tIt indicates that more results are available to fulfill the request and further calls are needed. If\n\t\t\t\tmaxResults is returned, it is possible the number of results is less than\n\t\t\t\tmaxResults.

" + "smithy.api#documentation": "

The nextToken value that's returned from a\n\t\t\t\tListServicesByNamespace request. It indicates that more results are\n\t\t\tavailable to fulfill the request and further calls are needed. If\n\t\t\t\tmaxResults is returned, it is possible the number of results is less\n\t\t\tthan maxResults.

" } }, "maxResults": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The maximum number of service results that ListServicesByNamespace returns in paginated\n\t\t\toutput. When this parameter is used, ListServicesByNamespace only returns\n\t\t\t\tmaxResults results in a single page along with a nextToken response\n\t\t\telement. The remaining results of the initial request can be seen by sending another\n\t\t\t\tListServicesByNamespace request with the returned nextToken value. This\n\t\t\tvalue can be between 1 and 100. If this parameter\n\t\t\tisn't used, then ListServicesByNamespace returns up to\n\t\t\t10 results and a nextToken value if\n\t\t\tapplicable.

" + "smithy.api#documentation": "

The maximum number of service results that ListServicesByNamespace\n\t\t\treturns in paginated output. When this parameter is used,\n\t\t\t\tListServicesByNamespace only returns maxResults results in\n\t\t\ta single page along with a nextToken response element. The remaining\n\t\t\tresults of the initial request can be seen by sending another\n\t\t\t\tListServicesByNamespace request with the returned\n\t\t\t\tnextToken value. This value can be between 1 and\n\t\t\t100. If this parameter isn't used, then\n\t\t\t\tListServicesByNamespace returns up to\n\t\t\t10 results and a nextToken\n\t\t\tvalue if applicable.

" } } }, @@ -7588,7 +7588,7 @@ "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The nextToken value to include in a future ListServicesByNamespace request.\n\t\t\tWhen the results of a ListServicesByNamespace request exceed maxResults, this\n\t\t\tvalue can be used to retrieve the next page of results. When there are no more results to return, this\n\t\t\tvalue is null.

" + "smithy.api#documentation": "

The nextToken value to include in a future\n\t\t\t\tListServicesByNamespace request. When the results of a\n\t\t\t\tListServicesByNamespace request exceed maxResults, this\n\t\t\tvalue can be used to retrieve the next page of results. When there are no more results\n\t\t\tto return, this value is null.

" } } }, @@ -7602,19 +7602,19 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster to use when filtering the ListServices\n\t\t\tresults. If you do not specify a cluster, the default cluster is assumed.

" + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster to use when filtering the\n\t\t\t\tListServices results. If you do not specify a cluster, the default cluster is assumed.

" } }, "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The nextToken value returned from a ListServices request indicating that\n\t\t\tmore results are available to fulfill the request and further calls will be needed. If\n\t\t\t\tmaxResults was provided, it is possible the number of results to be fewer than\n\t\t\t\tmaxResults.

\n \n

This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.

\n
" + "smithy.api#documentation": "

The nextToken value returned from a ListServices request\n\t\t\tindicating that more results are available to fulfill the request and further calls will\n\t\t\tbe needed. If maxResults was provided, it is possible the number of results\n\t\t\tto be fewer than maxResults.

\n \n

This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.

\n
" } }, "maxResults": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The maximum number of service results that ListServices returned in paginated output.\n\t\t\tWhen this parameter is used, ListServices only returns maxResults results in\n\t\t\ta single page along with a nextToken response element. The remaining results of the\n\t\t\tinitial request can be seen by sending another ListServices request with the returned\n\t\t\t\tnextToken value. This value can be between 1 and\n\t\t\t100. If this parameter isn't used, then ListServices returns up to\n\t\t\t10 results and a nextToken value if applicable.

" + "smithy.api#documentation": "

The maximum number of service results that ListServices returned in\n\t\t\tpaginated output. When this parameter is used, ListServices only returns\n\t\t\t\tmaxResults results in a single page along with a nextToken\n\t\t\tresponse element. The remaining results of the initial request can be seen by sending\n\t\t\tanother ListServices request with the returned nextToken\n\t\t\tvalue. This value can be between 1 and 100. If\n\t\t\tthis parameter isn't used, then ListServices returns up to\n\t\t\t10 results and a nextToken value if\n\t\t\tapplicable.

" } }, "launchType": { @@ -7626,7 +7626,7 @@ "schedulingStrategy": { "target": "com.amazonaws.ecs#SchedulingStrategy", "traits": { - "smithy.api#documentation": "

The scheduling strategy to use when filtering the ListServices results.

" + "smithy.api#documentation": "

The scheduling strategy to use when filtering the ListServices\n\t\t\tresults.

" } } }, @@ -7640,13 +7640,13 @@ "serviceArns": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The list of full ARN entries for each service that's associated with the specified cluster.

" + "smithy.api#documentation": "

The list of full ARN entries for each service that's associated with the specified\n\t\t\tcluster.

" } }, "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The nextToken value to include in a future ListServices request. When the\n\t\t\tresults of a ListServices request exceed maxResults, this value can be used\n\t\t\tto retrieve the next page of results. This value is null when there are no more results to\n\t\t\treturn.

" + "smithy.api#documentation": "

The nextToken value to include in a future ListServices\n\t\t\trequest. When the results of a ListServices request exceed\n\t\t\t\tmaxResults, this value can be used to retrieve the next page of\n\t\t\tresults. This value is null when there are no more results to\n\t\t\treturn.

" } } }, @@ -7703,7 +7703,7 @@ "resourceArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) that identifies the resource to list the tags for. Currently, the supported resources\n\t\t\tare Amazon ECS tasks, services, task definitions, clusters, and container instances.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that identifies the resource to list the tags for. Currently, the\n\t\t\tsupported resources are Amazon ECS tasks, services, task definitions, clusters, and container\n\t\t\tinstances.

", "smithy.api#required": {} } } @@ -7746,7 +7746,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of task definition families that are registered to your account. This list includes\n\t\t\ttask definition families that no longer have any ACTIVE task definition revisions.

\n

You can filter out task definition families that don't contain any ACTIVE task\n\t\t\tdefinition revisions by setting the status parameter to ACTIVE. You can also\n\t\t\tfilter the results with the familyPrefix parameter.

", + "smithy.api#documentation": "

Returns a list of task definition families that are registered to your account. This\n\t\t\tlist includes task definition families that no longer have any ACTIVE task\n\t\t\tdefinition revisions.

\n

You can filter out task definition families that don't contain any ACTIVE\n\t\t\ttask definition revisions by setting the status parameter to\n\t\t\t\tACTIVE. You can also filter the results with the\n\t\t\t\tfamilyPrefix parameter.

", "smithy.api#examples": [ { "title": "To filter your registered task definition families", @@ -7788,25 +7788,25 @@ "familyPrefix": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The familyPrefix is a string that's used to filter the results of\n\t\t\t\tListTaskDefinitionFamilies. If you specify a familyPrefix, only task\n\t\t\tdefinition family names that begin with the familyPrefix string are returned.

" + "smithy.api#documentation": "

The familyPrefix is a string that's used to filter the results of\n\t\t\t\tListTaskDefinitionFamilies. If you specify a familyPrefix,\n\t\t\tonly task definition family names that begin with the familyPrefix string\n\t\t\tare returned.

" } }, "status": { "target": "com.amazonaws.ecs#TaskDefinitionFamilyStatus", "traits": { - "smithy.api#documentation": "

The task definition family status to filter the ListTaskDefinitionFamilies results with.\n\t\t\tBy default, both ACTIVE and INACTIVE task definition families are listed. If\n\t\t\tthis parameter is set to ACTIVE, only task definition families that have an\n\t\t\t\tACTIVE task definition revision are returned. If this parameter is set to\n\t\t\t\tINACTIVE, only task definition families that do not have any ACTIVE task\n\t\t\tdefinition revisions are returned. If you paginate the resulting output, be sure to keep the\n\t\t\t\tstatus value constant in each subsequent request.

" + "smithy.api#documentation": "

The task definition family status to filter the\n\t\t\t\tListTaskDefinitionFamilies results with. By default, both\n\t\t\t\tACTIVE and INACTIVE task definition families are listed.\n\t\t\tIf this parameter is set to ACTIVE, only task definition families that have\n\t\t\tan ACTIVE task definition revision are returned. If this parameter is set\n\t\t\tto INACTIVE, only task definition families that do not have any\n\t\t\t\tACTIVE task definition revisions are returned. If you paginate the\n\t\t\tresulting output, be sure to keep the status value constant in each\n\t\t\tsubsequent request.

" } }, "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The nextToken value returned from a ListTaskDefinitionFamilies request\n\t\t\tindicating that more results are available to fulfill the request and further calls will be needed. If\n\t\t\t\tmaxResults was provided, it is possible the number of results to be fewer than\n\t\t\t\tmaxResults.

\n \n

This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.

\n
" + "smithy.api#documentation": "

The nextToken value returned from a\n\t\t\t\tListTaskDefinitionFamilies request indicating that more results are\n\t\t\tavailable to fulfill the request and further calls will be needed. If\n\t\t\t\tmaxResults was provided, it is possible the number of results to be\n\t\t\tfewer than maxResults.

\n \n

This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.

\n
" } }, "maxResults": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The maximum number of task definition family results that ListTaskDefinitionFamilies\n\t\t\treturned in paginated output. When this parameter is used, ListTaskDefinitions only\n\t\t\treturns maxResults results in a single page along with a nextToken response\n\t\t\telement. The remaining results of the initial request can be seen by sending another\n\t\t\t\tListTaskDefinitionFamilies request with the returned nextToken value.\n\t\t\tThis value can be between 1 and 100. If this parameter isn't used, then\n\t\t\t\tListTaskDefinitionFamilies returns up to 100 results and a\n\t\t\t\tnextToken value if applicable.

" + "smithy.api#documentation": "

The maximum number of task definition family results that\n\t\t\t\tListTaskDefinitionFamilies returned in paginated output. When this\n\t\t\tparameter is used, ListTaskDefinitions only returns maxResults\n\t\t\tresults in a single page along with a nextToken response element. The\n\t\t\tremaining results of the initial request can be seen by sending another\n\t\t\t\tListTaskDefinitionFamilies request with the returned\n\t\t\t\tnextToken value. This value can be between 1 and\n\t\t\t100. If this parameter isn't used, then\n\t\t\t\tListTaskDefinitionFamilies returns up to 100 results\n\t\t\tand a nextToken value if applicable.

" } } }, @@ -7820,13 +7820,13 @@ "families": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The list of task definition family names that match the ListTaskDefinitionFamilies\n\t\t\trequest.

" + "smithy.api#documentation": "

The list of task definition family names that match the\n\t\t\t\tListTaskDefinitionFamilies request.

" } }, "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The nextToken value to include in a future ListTaskDefinitionFamilies\n\t\t\trequest. When the results of a ListTaskDefinitionFamilies request exceed\n\t\t\t\tmaxResults, this value can be used to retrieve the next page of results. This value is\n\t\t\t\tnull when there are no more results to return.

" + "smithy.api#documentation": "

The nextToken value to include in a future\n\t\t\t\tListTaskDefinitionFamilies request. When the results of a\n\t\t\t\tListTaskDefinitionFamilies request exceed maxResults, this\n\t\t\tvalue can be used to retrieve the next page of results. This value is null\n\t\t\twhen there are no more results to return.

" } } }, @@ -7854,7 +7854,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of task definitions that are registered to your account. You can filter the results by\n\t\t\tfamily name with the familyPrefix parameter or by status with the status\n\t\t\tparameter.

", + "smithy.api#documentation": "

Returns a list of task definitions that are registered to your account. You can filter\n\t\t\tthe results by family name with the familyPrefix parameter or by status\n\t\t\twith the status parameter.

", "smithy.api#examples": [ { "title": "To list the registered task definitions in a family", @@ -7900,31 +7900,31 @@ "familyPrefix": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The full family name to filter the ListTaskDefinitions results with. Specifying a\n\t\t\t\tfamilyPrefix limits the listed task definitions to task definition revisions that\n\t\t\tbelong to that family.

" + "smithy.api#documentation": "

The full family name to filter the ListTaskDefinitions results with.\n\t\t\tSpecifying a familyPrefix limits the listed task definitions to task\n\t\t\tdefinition revisions that belong to that family.

" } }, "status": { "target": "com.amazonaws.ecs#TaskDefinitionStatus", "traits": { - "smithy.api#documentation": "

The task definition status to filter the ListTaskDefinitions results with. By default,\n\t\t\tonly ACTIVE task definitions are listed. By setting this parameter to\n\t\t\t\tINACTIVE, you can view task definitions that are INACTIVE as long as an\n\t\t\tactive task or service still references them. If you paginate the resulting output, be sure to keep the\n\t\t\t\tstatus value constant in each subsequent request.

" + "smithy.api#documentation": "

The task definition status to filter the ListTaskDefinitions results\n\t\t\twith. By default, only ACTIVE task definitions are listed. By setting this\n\t\t\tparameter to INACTIVE, you can view task definitions that are\n\t\t\t\tINACTIVE as long as an active task or service still references them. If\n\t\t\tyou paginate the resulting output, be sure to keep the status value\n\t\t\tconstant in each subsequent request.

" } }, "sort": { "target": "com.amazonaws.ecs#SortOrder", "traits": { - "smithy.api#documentation": "

The order to sort the results in. Valid values are ASC and DESC. By\n\t\t\tdefault, (ASC) task definitions are listed lexicographically by family name and in\n\t\t\tascending numerical order by revision so that the newest task definitions in a family are listed last.\n\t\t\tSetting this parameter to DESC reverses the sort order on family name and revision. This\n\t\t\tis so that the newest task definitions in a family are listed first.

" + "smithy.api#documentation": "

The order to sort the results in. Valid values are ASC and\n\t\t\t\tDESC. By default, (ASC) task definitions are listed\n\t\t\tlexicographically by family name and in ascending numerical order by revision so that\n\t\t\tthe newest task definitions in a family are listed last. Setting this parameter to\n\t\t\t\tDESC reverses the sort order on family name and revision. This is so\n\t\t\tthat the newest task definitions in a family are listed first.

" } }, "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The nextToken value returned from a ListTaskDefinitions request indicating\n\t\t\tthat more results are available to fulfill the request and further calls will be needed. If\n\t\t\t\tmaxResults was provided, it is possible the number of results to be fewer than\n\t\t\t\tmaxResults.

\n \n

This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.

\n
" + "smithy.api#documentation": "

The nextToken value returned from a ListTaskDefinitions\n\t\t\trequest indicating that more results are available to fulfill the request and further\n\t\t\tcalls will be needed. If maxResults was provided, it is possible the number\n\t\t\tof results to be fewer than maxResults.

\n \n

This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.

\n
" } }, "maxResults": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The maximum number of task definition results that ListTaskDefinitions returned in\n\t\t\tpaginated output. When this parameter is used, ListTaskDefinitions only returns\n\t\t\t\tmaxResults results in a single page along with a nextToken response\n\t\t\telement. The remaining results of the initial request can be seen by sending another\n\t\t\t\tListTaskDefinitions request with the returned nextToken value. This value\n\t\t\tcan be between 1 and 100. If this parameter isn't used, then\n\t\t\t\tListTaskDefinitions returns up to 100 results and a\n\t\t\t\tnextToken value if applicable.

" + "smithy.api#documentation": "

The maximum number of task definition results that ListTaskDefinitions\n\t\t\treturned in paginated output. When this parameter is used,\n\t\t\t\tListTaskDefinitions only returns maxResults results in a\n\t\t\tsingle page along with a nextToken response element. The remaining results\n\t\t\tof the initial request can be seen by sending another ListTaskDefinitions\n\t\t\trequest with the returned nextToken value. This value can be between\n\t\t\t1 and 100. If this parameter isn't used, then\n\t\t\t\tListTaskDefinitions returns up to 100 results and a\n\t\t\t\tnextToken value if applicable.

" } } }, @@ -7944,7 +7944,7 @@ "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The nextToken value to include in a future ListTaskDefinitions request.\n\t\t\tWhen the results of a ListTaskDefinitions request exceed maxResults, this\n\t\t\tvalue can be used to retrieve the next page of results. This value is null when there are\n\t\t\tno more results to return.

" + "smithy.api#documentation": "

The nextToken value to include in a future\n\t\t\t\tListTaskDefinitions request. When the results of a\n\t\t\t\tListTaskDefinitions request exceed maxResults, this value\n\t\t\tcan be used to retrieve the next page of results. This value is null when\n\t\t\tthere are no more results to return.

" } } }, @@ -7978,7 +7978,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of tasks. You can filter the results by cluster, task definition family, container\n\t\t\tinstance, launch type, what IAM principal started the task, or by the desired status of the\n\t\t\ttask.

\n

Recently stopped tasks might appear in the returned results.

", + "smithy.api#documentation": "

Returns a list of tasks. You can filter the results by cluster, task definition\n\t\t\tfamily, container instance, launch type, what IAM principal started the task, or by\n\t\t\tthe desired status of the task.

\n

Recently stopped tasks might appear in the returned results.

", "smithy.api#examples": [ { "title": "To list the tasks on a particular container instance", @@ -8021,49 +8021,49 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster to use when filtering the ListTasks\n\t\t\tresults. If you do not specify a cluster, the default cluster is assumed.

" + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster to use when filtering the\n\t\t\t\tListTasks results. If you do not specify a cluster, the default cluster is assumed.

" } }, "containerInstance": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The container instance ID or full ARN of the container instance to use when filtering the\n\t\t\t\tListTasks results. Specifying a containerInstance limits the results to\n\t\t\ttasks that belong to that container instance.

" + "smithy.api#documentation": "

The container instance ID or full ARN of the container instance to use when\n\t\t\tfiltering the ListTasks results. Specifying a\n\t\t\t\tcontainerInstance limits the results to tasks that belong to that\n\t\t\tcontainer instance.

" } }, "family": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of the task definition family to use when filtering the ListTasks results.\n\t\t\tSpecifying a family limits the results to tasks that belong to that family.

" + "smithy.api#documentation": "

The name of the task definition family to use when filtering the\n\t\t\t\tListTasks results. Specifying a family limits the results\n\t\t\tto tasks that belong to that family.

" } }, "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The nextToken value returned from a ListTasks request indicating that more\n\t\t\tresults are available to fulfill the request and further calls will be needed. If\n\t\t\t\tmaxResults was provided, it's possible the number of results to be fewer than\n\t\t\t\tmaxResults.

\n \n

This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.

\n
" + "smithy.api#documentation": "

The nextToken value returned from a ListTasks request\n\t\t\tindicating that more results are available to fulfill the request and further calls will\n\t\t\tbe needed. If maxResults was provided, it's possible the number of results\n\t\t\tto be fewer than maxResults.

\n \n

This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.

\n
" } }, "maxResults": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The maximum number of task results that ListTasks returned in paginated output. When\n\t\t\tthis parameter is used, ListTasks only returns maxResults results in a single\n\t\t\tpage along with a nextToken response element. The remaining results of the initial request\n\t\t\tcan be seen by sending another ListTasks request with the returned nextToken\n\t\t\tvalue. This value can be between 1 and 100. If this parameter isn't used,\n\t\t\tthen ListTasks returns up to 100 results and a nextToken\n\t\t\tvalue if applicable.

" + "smithy.api#documentation": "

The maximum number of task results that ListTasks returned in paginated\n\t\t\toutput. When this parameter is used, ListTasks only returns\n\t\t\t\tmaxResults results in a single page along with a nextToken\n\t\t\tresponse element. The remaining results of the initial request can be seen by sending\n\t\t\tanother ListTasks request with the returned nextToken value.\n\t\t\tThis value can be between 1 and 100. If this parameter\n\t\t\tisn't used, then ListTasks returns up to 100 results and\n\t\t\ta nextToken value if applicable.

" } }, "startedBy": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The startedBy value to filter the task results with. Specifying a startedBy\n\t\t\tvalue limits the results to tasks that were started with that value.

\n

When you specify startedBy as the filter, it must be the only filter that you\n\t\t\tuse.

" + "smithy.api#documentation": "

The startedBy value to filter the task results with. Specifying a\n\t\t\t\tstartedBy value limits the results to tasks that were started with that\n\t\t\tvalue.

\n

When you specify startedBy as the filter, it must be the only filter that\n\t\t\tyou use.

" } }, "serviceName": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of the service to use when filtering the ListTasks results. Specifying a\n\t\t\t\tserviceName limits the results to tasks that belong to that service.

" + "smithy.api#documentation": "

The name of the service to use when filtering the ListTasks results.\n\t\t\tSpecifying a serviceName limits the results to tasks that belong to that\n\t\t\tservice.

" } }, "desiredStatus": { "target": "com.amazonaws.ecs#DesiredStatus", "traits": { - "smithy.api#documentation": "

The task desired status to use when filtering the ListTasks results. Specifying a\n\t\t\t\tdesiredStatus of STOPPED limits the results to tasks that Amazon ECS has set\n\t\t\tthe desired status to STOPPED. This can be useful for debugging tasks that aren't starting\n\t\t\tproperly or have died or finished. The default status filter is RUNNING, which shows tasks\n\t\t\tthat Amazon ECS has set the desired status to RUNNING.

\n \n

Although you can filter results based on a desired status of PENDING, this doesn't\n\t\t\t\treturn any results. Amazon ECS never sets the desired status of a task to that value (only a task's\n\t\t\t\t\tlastStatus may have a value of PENDING).

\n
" + "smithy.api#documentation": "

The task desired status to use when filtering the ListTasks results.\n\t\t\tSpecifying a desiredStatus of STOPPED limits the results to\n\t\t\ttasks that Amazon ECS has set the desired status to STOPPED. This can be useful\n\t\t\tfor debugging tasks that aren't starting properly or have died or finished. The default\n\t\t\tstatus filter is RUNNING, which shows tasks that Amazon ECS has set the desired\n\t\t\tstatus to RUNNING.

\n \n

Although you can filter results based on a desired status of PENDING,\n\t\t\t\tthis doesn't return any results. Amazon ECS never sets the desired status of a task to\n\t\t\t\tthat value (only a task's lastStatus may have a value of\n\t\t\t\t\tPENDING).

\n
" } }, "launchType": { @@ -8089,7 +8089,7 @@ "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The nextToken value to include in a future ListTasks request. When the\n\t\t\tresults of a ListTasks request exceed maxResults, this value can be used to\n\t\t\tretrieve the next page of results. This value is null when there are no more results to\n\t\t\treturn.

" + "smithy.api#documentation": "

The nextToken value to include in a future ListTasks\n\t\t\trequest. When the results of a ListTasks request exceed\n\t\t\t\tmaxResults, this value can be used to retrieve the next page of\n\t\t\tresults. This value is null when there are no more results to\n\t\t\treturn.

" } } }, @@ -8103,30 +8103,30 @@ "targetGroupArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set.

\n

A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer.

\n

For services using the ECS deployment controller, you can specify one or multiple target\n\t\t\tgroups. For more information, see Registering multiple target groups with a service in the Amazon Elastic Container Service Developer Guide.

\n

For services using the CODE_DEPLOY deployment controller, you're required to define two\n\t\t\ttarget groups for the load balancer. For more information, see Blue/green deployment with CodeDeploy in the Amazon Elastic Container Service Developer Guide.

\n \n

If your service's task definition uses the awsvpc network mode, you must choose\n\t\t\t\t\tip as the target type, not instance. Do this when creating your\n\t\t\t\ttarget groups because tasks that use the awsvpc network mode are associated with an\n\t\t\t\telastic network interface, not an Amazon EC2 instance. This network mode is required for the\n\t\t\t\tFargate launch type.

\n
" + "smithy.api#documentation": "

The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or\n\t\t\ttask set.

\n

A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer.

\n

For services using the ECS deployment controller, you can specify one or\n\t\t\tmultiple target groups. For more information, see Registering multiple target groups with a service in\n\t\t\tthe Amazon Elastic Container Service Developer Guide.

\n

For services using the CODE_DEPLOY deployment controller, you're required\n\t\t\tto define two target groups for the load balancer. For more information, see Blue/green deployment with CodeDeploy in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

\n \n

If your service's task definition uses the awsvpc network mode, you\n\t\t\t\tmust choose ip as the target type, not instance. Do this\n\t\t\t\twhen creating your target groups because tasks that use the awsvpc\n\t\t\t\tnetwork mode are associated with an elastic network interface, not an Amazon EC2\n\t\t\t\tinstance. This network mode is required for the Fargate launch\n\t\t\t\ttype.

\n
" } }, "loadBalancerName": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of the load balancer to associate with the Amazon ECS service or task set.

\n

If you are using an Application Load Balancer or a Network Load Balancer the load balancer name parameter should be omitted.

" + "smithy.api#documentation": "

The name of the load balancer to associate with the Amazon ECS service or task set.

\n

If you are using an Application Load Balancer or a Network Load Balancer the load balancer name parameter should be\n\t\t\tomitted.

" } }, "containerName": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of the container (as it appears in a container definition) to associate with the load\n\t\t\tbalancer.

\n

You need to specify the container name when configuring the target group for an Amazon ECS load\n\t\t\tbalancer.

" + "smithy.api#documentation": "

The name of the container (as it appears in a container definition) to associate with\n\t\t\tthe load balancer.

\n

You need to specify the container name when configuring the target group for an Amazon ECS\n\t\t\tload balancer.

" } }, "containerPort": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The port on the container to associate with the load balancer. This port must correspond to a\n\t\t\t\tcontainerPort in the task definition the tasks in the service are using. For tasks\n\t\t\tthat use the EC2 launch type, the container instance they're launched on must allow\n\t\t\tingress traffic on the hostPort of the port mapping.

" + "smithy.api#documentation": "

The port on the container to associate with the load balancer. This port must\n\t\t\tcorrespond to a containerPort in the task definition the tasks in the\n\t\t\tservice are using. For tasks that use the EC2 launch type, the container\n\t\t\tinstance they're launched on must allow ingress traffic on the hostPort of\n\t\t\tthe port mapping.

" } } }, "traits": { - "smithy.api#documentation": "

The load balancer configuration to use with a service or task set.

\n

When you add, update, or remove a load balancer configuration, Amazon ECS starts a new deployment with the\n\t\t\tupdated Elastic Load Balancing configuration. This causes tasks to register to and deregister from load\n\t\t\tbalancers.

\n

We recommend that you verify this on a test environment before you update the Elastic Load Balancing configuration.

\n

A service-linked role is required for services that use multiple target groups. For more information,\n\t\t\tsee Using service-linked roles in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The load balancer configuration to use with a service or task set.

\n

When you add, update, or remove a load balancer configuration, Amazon ECS starts a new\n\t\t\tdeployment with the updated Elastic Load Balancing configuration. This causes tasks to register to and\n\t\t\tderegister from load balancers.

\n

We recommend that you verify this on a test environment before you update the Elastic Load Balancing\n\t\t\tconfiguration.

\n

A service-linked role is required for services that use multiple target groups. For\n\t\t\tmore information, see Using\n\t\t\t\tservice-linked roles in the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#LoadBalancers": { @@ -8141,25 +8141,25 @@ "logDriver": { "target": "com.amazonaws.ecs#LogDriver", "traits": { - "smithy.api#documentation": "

The log driver to use for the container.

\n

For tasks on Fargate, the supported log drivers are awslogs, splunk, and\n\t\t\t\tawsfirelens.

\n

For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs,\n\t\t\t\tfluentd, gelf, json-file, journald,\n\t\t\t\tsyslog, splunk, and awsfirelens.

\n

For more information about using the awslogs log driver, see Send Amazon ECS logs to CloudWatch in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

\n

For more information about using the awsfirelens log driver, see Send Amazon ECS logs to\n\t\t\t\tan Amazon Web Services service or Amazon Web Services Partner.

\n \n

If you have a custom driver that isn't listed, you can fork the Amazon ECS container agent project\n\t\t\t\tthat's available on GitHub and\n\t\t\t\tcustomize it to work with that driver. We encourage you to submit pull requests for changes that\n\t\t\t\tyou would like to have included. However, we don't currently provide support for running modified\n\t\t\t\tcopies of this software.

\n
", + "smithy.api#documentation": "

The log driver to use for the container.

\n

For tasks on Fargate, the supported log drivers are awslogs,\n\t\t\t\tsplunk, and awsfirelens.

\n

For tasks hosted on Amazon EC2 instances, the supported log drivers are\n\t\t\t\tawslogs, fluentd, gelf,\n\t\t\t\tjson-file, journald, syslog,\n\t\t\t\tsplunk, and awsfirelens.

\n

For more information about using the awslogs log driver, see Send\n\t\t\t\tAmazon ECS logs to CloudWatch in the Amazon Elastic Container Service Developer Guide.

\n

For more information about using the awsfirelens log driver, see Send\n\t\t\t\tAmazon ECS logs to an Amazon Web Services service or Amazon Web Services Partner.

\n \n

If you have a custom driver that isn't listed, you can fork the Amazon ECS container\n\t\t\t\tagent project that's available\n\t\t\t\t\ton GitHub and customize it to work with that driver. We encourage you to\n\t\t\t\tsubmit pull requests for changes that you would like to have included. However, we\n\t\t\t\tdon't currently provide support for running modified copies of this software.

\n
", "smithy.api#required": {} } }, "options": { "target": "com.amazonaws.ecs#LogConfigurationOptionsMap", "traits": { - "smithy.api#documentation": "

The configuration options to send to the log driver.

\n

The options you can specify depend on the log driver. Some of the options you can specify when you\n\t\t\tuse the awslogs log driver to route logs to Amazon CloudWatch include the following:

\n
\n
awslogs-create-group
\n
\n

Required: No

\n

Specify whether you want the log group to be created automatically. If this option isn't\n\t\t\t\t\t\tspecified, it defaults to false.

\n \n

Your IAM policy must include the logs:CreateLogGroup permission before\n\t\t\t\t\t\t\tyou attempt to use awslogs-create-group.

\n
\n
\n
awslogs-region
\n
\n

Required: Yes

\n

Specify the Amazon Web Services Region that the awslogs log driver is to send your Docker\n\t\t\t\t\t\tlogs to. You can choose to send all of your logs from clusters in different Regions to a\n\t\t\t\t\t\tsingle region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you\n\t\t\t\t\t\tcan separate them by Region for more granularity. Make sure that the specified log group\n\t\t\t\t\t\texists in the Region that you specify with this option.

\n
\n
awslogs-group
\n
\n

Required: Yes

\n

Make sure to specify a log group that the awslogs log driver sends its log\n\t\t\t\t\t\tstreams to.

\n
\n
awslogs-stream-prefix
\n
\n

Required: Yes, when using the Fargate launch\n\t\t\t\t\t\t\ttype.Optional for the EC2 launch type, required for the\n\t\t\t\t\t\t\tFargate launch type.

\n

Use the awslogs-stream-prefix option to associate a log stream with the\n\t\t\t\t\t\tspecified prefix, the container name, and the ID of the Amazon ECS task that the container\n\t\t\t\t\t\tbelongs to. If you specify a prefix with this option, then the log stream takes the format\n\t\t\t\t\t\t\tprefix-name/container-name/ecs-task-id.

\n

If you don't specify a prefix with this option, then the log stream is named after the\n\t\t\t\t\t\tcontainer ID that's assigned by the Docker daemon on the container instance. Because it's\n\t\t\t\t\t\tdifficult to trace logs back to the container that sent them with just the Docker container\n\t\t\t\t\t\tID (which is only available on the container instance), we recommend that you specify a\n\t\t\t\t\t\tprefix with this option.

\n

For Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace\n\t\t\t\t\t\tlog streams to the service that the container belongs to, the name of the container that\n\t\t\t\t\t\tsent them, and the ID of the task that the container belongs to.

\n

You must specify a stream-prefix for your logs to have your logs appear in the Log pane\n\t\t\t\t\t\twhen using the Amazon ECS console.

\n
\n
awslogs-datetime-format
\n
\n

Required: No

\n

This option defines a multiline start pattern in Python strftime format. A\n\t\t\t\t\t\tlog message consists of a line that matches the pattern and any following lines that don’t\n\t\t\t\t\t\tmatch the pattern. The matched line is the delimiter between log messages.

\n

One example of a use case for using this format is for parsing output such as a stack\n\t\t\t\t\t\tdump, which might otherwise be logged in multiple entries. The correct pattern allows it to\n\t\t\t\t\t\tbe captured in a single entry.

\n

For more information, see awslogs-datetime-format.

\n

You cannot configure both the awslogs-datetime-format and\n\t\t\t\t\t\t\tawslogs-multiline-pattern options.

\n \n

Multiline logging performs regular expression parsing and matching of all log\n\t\t\t\t\t\t\tmessages. This might have a negative impact on logging performance.

\n
\n
\n
awslogs-multiline-pattern
\n
\n

Required: No

\n

This option defines a multiline start pattern that uses a regular expression. A log\n\t\t\t\t\t\tmessage consists of a line that matches the pattern and any following lines that don’t\n\t\t\t\t\t\tmatch the pattern. The matched line is the delimiter between log messages.

\n

For more information, see awslogs-multiline-pattern.

\n

This option is ignored if awslogs-datetime-format is also configured.

\n

You cannot configure both the awslogs-datetime-format and\n\t\t\t\t\t\t\tawslogs-multiline-pattern options.

\n \n

Multiline logging performs regular expression parsing and matching of all log\n\t\t\t\t\t\t\tmessages. This might have a negative impact on logging performance.

\n
\n
\n
mode
\n
\n

Required: No

\n

Valid values: non-blocking | blocking\n

\n

This option defines the delivery mode of log messages from the container to CloudWatch Logs. The\n\t\t\t\t\t\tdelivery mode you choose affects application availability when the flow of logs from\n\t\t\t\t\t\tcontainer to CloudWatch is interrupted.

\n

If you use the blocking mode and the flow of logs to CloudWatch is interrupted,\n\t\t\t\t\t\tcalls from container code to write to the stdout and stderr\n\t\t\t\t\t\tstreams will block. The logging thread of the application will block as a result. This may\n\t\t\t\t\t\tcause the application to become unresponsive and lead to container healthcheck failure.

\n

If you use the non-blocking mode, the container's logs are instead stored in\n\t\t\t\t\t\tan in-memory intermediate buffer configured with the max-buffer-size option.\n\t\t\t\t\t\tThis prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch.\n\t\t\t\t\t\tWe recommend using this mode if you want to ensure service availability and are okay with\n\t\t\t\t\t\tsome log loss. For more information, see Preventing log loss with non-blocking mode in the awslogs container log\n\t\t\t\t\t\t\tdriver.

\n
\n
max-buffer-size
\n
\n

Required: No

\n

Default value: 1m\n

\n

When non-blocking mode is used, the max-buffer-size log option\n\t\t\t\t\t\tcontrols the size of the buffer that's used for intermediate message storage. Make sure to\n\t\t\t\t\t\tspecify an adequate buffer size based on your application. When the buffer fills up,\n\t\t\t\t\t\tfurther logs cannot be stored. Logs that cannot be stored are lost.

\n
\n
\n

To route logs using the splunk log router, you need to specify a\n\t\t\t\tsplunk-token and a splunk-url.

\n

When you use the awsfirelens log router to route logs to an Amazon Web Services Service or Amazon Web Services Partner Network\n\t\t\tdestination for log storage and analytics, you can set the log-driver-buffer-limit option\n\t\t\tto limit the number of events that are buffered in memory, before being sent to the log router\n\t\t\tcontainer. It can help to resolve potential log loss issue because high throughput might result in\n\t\t\tmemory running out for the buffer inside of Docker.

\n

Other options you can specify when using awsfirelens to route logs depend on the\n\t\t\tdestination. When you export logs to Amazon Data Firehose, you can specify the Amazon Web Services Region with\n\t\t\t\tregion and a name for the log stream with delivery_stream.

\n

When you export logs to Amazon Kinesis Data Streams, you can specify an Amazon Web Services Region with region and a\n\t\t\tdata stream name with stream.

\n

When you export logs to Amazon OpenSearch Service, you can specify options like Name, Host\n\t\t\t(OpenSearch Service endpoint without protocol), Port, Index, Type,\n\t\t\t\tAws_auth, Aws_region, Suppress_Type_Name, and\n\t\t\t\ttls.

\n

When you export logs to Amazon S3, you can specify the bucket using the bucket option. You\n\t\t\tcan also specify region, total_file_size, upload_timeout, and\n\t\t\t\tuse_put_object as options.

\n

This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

" + "smithy.api#documentation": "

The configuration options to send to the log driver.

\n

The options you can specify depend on the log driver. Some of the options you can\n\t\t\tspecify when you use the awslogs log driver to route logs to Amazon CloudWatch\n\t\t\tinclude the following:

\n
\n
awslogs-create-group
\n
\n

Required: No

\n

Specify whether you want the log group to be created automatically. If\n\t\t\t\t\t\tthis option isn't specified, it defaults to false.

\n \n

Your IAM policy must include the logs:CreateLogGroup\n\t\t\t\t\t\t\tpermission before you attempt to use\n\t\t\t\t\t\t\tawslogs-create-group.

\n
\n
\n
awslogs-region
\n
\n

Required: Yes

\n

Specify the Amazon Web Services Region that the awslogs log driver is to\n\t\t\t\t\t\tsend your Docker logs to. You can choose to send all of your logs from\n\t\t\t\t\t\tclusters in different Regions to a single region in CloudWatch Logs. This is so that\n\t\t\t\t\t\tthey're all visible in one location. Otherwise, you can separate them by\n\t\t\t\t\t\tRegion for more granularity. Make sure that the specified log group exists\n\t\t\t\t\t\tin the Region that you specify with this option.

\n
\n
awslogs-group
\n
\n

Required: Yes

\n

Make sure to specify a log group that the awslogs log driver\n\t\t\t\t\t\tsends its log streams to.

\n
\n
awslogs-stream-prefix
\n
\n

Required: Yes, when using the Fargate launch\n\t\t\t\t\t\t\ttype.Optional for the EC2 launch type,\n\t\t\t\t\t\t\trequired for the Fargate launch type.

\n

Use the awslogs-stream-prefix option to associate a log\n\t\t\t\t\t\tstream with the specified prefix, the container name, and the ID of the\n\t\t\t\t\t\tAmazon ECS task that the container belongs to. If you specify a prefix with this\n\t\t\t\t\t\toption, then the log stream takes the format\n\t\t\t\t\t\t\tprefix-name/container-name/ecs-task-id.

\n

If you don't specify a prefix with this option, then the log stream is\n\t\t\t\t\t\tnamed after the container ID that's assigned by the Docker daemon on the\n\t\t\t\t\t\tcontainer instance. Because it's difficult to trace logs back to the\n\t\t\t\t\t\tcontainer that sent them with just the Docker container ID (which is only\n\t\t\t\t\t\tavailable on the container instance), we recommend that you specify a prefix\n\t\t\t\t\t\twith this option.

\n

For Amazon ECS services, you can use the service name as the prefix. Doing so,\n\t\t\t\t\t\tyou can trace log streams to the service that the container belongs to, the\n\t\t\t\t\t\tname of the container that sent them, and the ID of the task that the\n\t\t\t\t\t\tcontainer belongs to.

\n

You must specify a stream-prefix for your logs to have your logs appear in\n\t\t\t\t\t\tthe Log pane when using the Amazon ECS console.

\n
\n
awslogs-datetime-format
\n
\n

Required: No

\n

This option defines a multiline start pattern in Python\n\t\t\t\t\t\t\tstrftime format. A log message consists of a line that\n\t\t\t\t\t\tmatches the pattern and any following lines that don’t match the pattern.\n\t\t\t\t\t\tThe matched line is the delimiter between log messages.

\n

One example of a use case for using this format is for parsing output such\n\t\t\t\t\t\tas a stack dump, which might otherwise be logged in multiple entries. The\n\t\t\t\t\t\tcorrect pattern allows it to be captured in a single entry.

\n

For more information, see awslogs-datetime-format.

\n

You cannot configure both the awslogs-datetime-format and\n\t\t\t\t\t\t\tawslogs-multiline-pattern options.

\n \n

Multiline logging performs regular expression parsing and matching of\n\t\t\t\t\t\t\tall log messages. This might have a negative impact on logging\n\t\t\t\t\t\t\tperformance.

\n
\n
\n
awslogs-multiline-pattern
\n
\n

Required: No

\n

This option defines a multiline start pattern that uses a regular\n\t\t\t\t\t\texpression. A log message consists of a line that matches the pattern and\n\t\t\t\t\t\tany following lines that don’t match the pattern. The matched line is the\n\t\t\t\t\t\tdelimiter between log messages.

\n

For more information, see awslogs-multiline-pattern.

\n

This option is ignored if awslogs-datetime-format is also\n\t\t\t\t\t\tconfigured.

\n

You cannot configure both the awslogs-datetime-format and\n\t\t\t\t\t\t\tawslogs-multiline-pattern options.

\n \n

Multiline logging performs regular expression parsing and matching of\n\t\t\t\t\t\t\tall log messages. This might have a negative impact on logging\n\t\t\t\t\t\t\tperformance.

\n
\n
\n
mode
\n
\n

Required: No

\n

Valid values: non-blocking | blocking\n

\n

This option defines the delivery mode of log messages from the container\n\t\t\t\t\t\tto CloudWatch Logs. The delivery mode you choose affects application availability when\n\t\t\t\t\t\tthe flow of logs from container to CloudWatch is interrupted.

\n

If you use the blocking mode and the flow of logs to CloudWatch is\n\t\t\t\t\t\tinterrupted, calls from container code to write to the stdout\n\t\t\t\t\t\tand stderr streams will block. The logging thread of the\n\t\t\t\t\t\tapplication will block as a result. This may cause the application to become\n\t\t\t\t\t\tunresponsive and lead to container healthcheck failure.

\n

If you use the non-blocking mode, the container's logs are\n\t\t\t\t\t\tinstead stored in an in-memory intermediate buffer configured with the\n\t\t\t\t\t\t\tmax-buffer-size option. This prevents the application from\n\t\t\t\t\t\tbecoming unresponsive when logs cannot be sent to CloudWatch. We recommend using\n\t\t\t\t\t\tthis mode if you want to ensure service availability and are okay with some\n\t\t\t\t\t\tlog loss. For more information, see Preventing log loss with non-blocking mode in the awslogs\n\t\t\t\t\t\t\tcontainer log driver.

\n
\n
max-buffer-size
\n
\n

Required: No

\n

Default value: 1m\n

\n

When non-blocking mode is used, the\n\t\t\t\t\t\t\tmax-buffer-size log option controls the size of the buffer\n\t\t\t\t\t\tthat's used for intermediate message storage. Make sure to specify an\n\t\t\t\t\t\tadequate buffer size based on your application. When the buffer fills up,\n\t\t\t\t\t\tfurther logs cannot be stored. Logs that cannot be stored are lost.

\n
\n
\n

To route logs using the splunk log router, you need to specify a\n\t\t\t\tsplunk-token and a splunk-url.

\n

When you use the awsfirelens log router to route logs to an Amazon Web Services Service\n\t\t\tor Amazon Web Services Partner Network destination for log storage and analytics, you can set the\n\t\t\t\tlog-driver-buffer-limit option to limit the number of events that are\n\t\t\tbuffered in memory, before being sent to the log router container. It can help to\n\t\t\tresolve potential log loss issue because high throughput might result in memory running\n\t\t\tout for the buffer inside of Docker.

\n

Other options you can specify when using awsfirelens to route logs depend\n\t\t\ton the destination. When you export logs to Amazon Data Firehose, you can specify the Amazon Web Services Region\n\t\t\twith region and a name for the log stream with\n\t\t\tdelivery_stream.

\n

When you export logs to Amazon Kinesis Data Streams, you can specify an Amazon Web Services Region with\n\t\t\t\tregion and a data stream name with stream.

\n

When you export logs to Amazon OpenSearch Service, you can specify options like Name,\n\t\t\t\tHost (OpenSearch Service endpoint without protocol), Port,\n\t\t\t\tIndex, Type, Aws_auth,\n\t\t\t\tAws_region, Suppress_Type_Name, and\n\t\t\ttls. For more information, see Under the hood: FireLens for Amazon ECS Tasks.

\n

When you export logs to Amazon S3, you can specify the bucket using the bucket\n\t\t\toption. You can also specify region, total_file_size,\n\t\t\t\tupload_timeout, and use_put_object as options.

\n

This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

" } }, "secretOptions": { "target": "com.amazonaws.ecs#SecretList", "traits": { - "smithy.api#documentation": "

The secrets to pass to the log configuration. For more information, see Specifying sensitive\n\t\t\t\tdata in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The secrets to pass to the log configuration. For more information, see Specifying\n\t\t\t\tsensitive data in the Amazon Elastic Container Service Developer Guide.

" } } }, "traits": { - "smithy.api#documentation": "

The log configuration for the container. This parameter maps to LogConfig in the docker\n\t\t\tcontainer create command and the --log-driver option to docker run.

\n

By default, containers use the same logging driver that the Docker daemon uses. However, the\n\t\t\tcontainer might use a different logging driver than the Docker daemon by specifying a log driver\n\t\t\tconfiguration in the container definition.

\n

Understand the following when specifying a log configuration for your containers.

\n
    \n
  • \n

    Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon.\n\t\t\t\t\tAdditional log drivers may be available in future releases of the Amazon ECS container agent.

    \n

    For tasks on Fargate, the supported log drivers are awslogs,\n\t\t\t\t\t\tsplunk, and awsfirelens.

    \n

    For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs,\n\t\t\t\t\t\tfluentd, gelf, json-file,\n\t\t\t\t\t\tjournald,syslog, splunk, and\n\t\t\t\t\t\tawsfirelens.

    \n
  • \n
  • \n

    This parameter requires version 1.18 of the Docker Remote API or greater on your container\n\t\t\t\t\tinstance.

    \n
  • \n
  • \n

    For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the\n\t\t\t\t\tavailable logging drivers with the ECS_AVAILABLE_LOGGING_DRIVERS environment\n\t\t\t\t\tvariable before containers placed on that instance can use these log configuration options. For\n\t\t\t\t\tmore information, see Amazon ECS container agent configuration in the\n\t\t\t\t\tAmazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    For tasks that are on Fargate, because you don't have access to the underlying\n\t\t\t\t\tinfrastructure your tasks are hosted on, any additional software needed must be installed\n\t\t\t\t\toutside of the task. For example, the Fluentd output aggregators or a remote host running\n\t\t\t\t\tLogstash to send Gelf logs to.

    \n
  • \n
" + "smithy.api#documentation": "

The log configuration for the container. This parameter maps to LogConfig\n\t\t\tin the docker container create command and the --log-driver option to\n\t\t\tdocker run.

\n

By default, containers use the same logging driver that the Docker daemon uses.\n\t\t\tHowever, the container might use a different logging driver than the Docker daemon by\n\t\t\tspecifying a log driver configuration in the container definition.

\n

Understand the following when specifying a log configuration for your\n\t\t\tcontainers.

\n
    \n
  • \n

    Amazon ECS currently supports a subset of the logging drivers available to the\n\t\t\t\t\tDocker daemon. Additional log drivers may be available in future releases of the\n\t\t\t\t\tAmazon ECS container agent.

    \n

    For tasks on Fargate, the supported log drivers are awslogs,\n\t\t\t\t\t\tsplunk, and awsfirelens.

    \n

    For tasks hosted on Amazon EC2 instances, the supported log drivers are\n\t\t\t\t\t\tawslogs, fluentd, gelf,\n\t\t\t\t\t\tjson-file, journald,syslog,\n\t\t\t\t\t\tsplunk, and awsfirelens.

    \n
  • \n
  • \n

    This parameter requires version 1.18 of the Docker Remote API or greater on\n\t\t\t\t\tyour container instance.

    \n
  • \n
  • \n

    For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must\n\t\t\t\t\tregister the available logging drivers with the\n\t\t\t\t\t\tECS_AVAILABLE_LOGGING_DRIVERS environment variable before\n\t\t\t\t\tcontainers placed on that instance can use these log configuration options. For\n\t\t\t\t\tmore information, see Amazon ECS container agent configuration in the\n\t\t\t\t\tAmazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    For tasks that are on Fargate, because you don't have access to the\n\t\t\t\t\tunderlying infrastructure your tasks are hosted on, any additional software\n\t\t\t\t\tneeded must be installed outside of the task. For example, the Fluentd output\n\t\t\t\t\taggregators or a remote host running Logstash to send Gelf logs to.

    \n
  • \n
" } }, "com.amazonaws.ecs#LogConfigurationOptionsMap": { @@ -8242,7 +8242,7 @@ "name": { "target": "com.amazonaws.ecs#ManagedAgentName", "traits": { - "smithy.api#documentation": "

The name of the managed agent. When the execute command feature is turned on, the managed agent name\n\t\t\tis ExecuteCommandAgent.

" + "smithy.api#documentation": "

The name of the managed agent. When the execute command feature is turned on, the\n\t\t\tmanaged agent name is ExecuteCommandAgent.

" } }, "reason": { @@ -8349,30 +8349,30 @@ "targetCapacity": { "target": "com.amazonaws.ecs#ManagedScalingTargetCapacity", "traits": { - "smithy.api#documentation": "

The target capacity utilization as a percentage for the capacity provider. The specified value must\n\t\t\tbe greater than 0 and less than or equal to 100. For example, if you want the\n\t\t\tcapacity provider to maintain 10% spare capacity, then that means the utilization is 90%, so use a\n\t\t\t\ttargetCapacity of 90. The default value of 100 percent\n\t\t\tresults in the Amazon EC2 instances in your Auto Scaling group being completely used.

" + "smithy.api#documentation": "

The target capacity utilization as a percentage for the capacity provider. The\n\t\t\tspecified value must be greater than 0 and less than or equal to\n\t\t\t\t100. For example, if you want the capacity provider to maintain 10%\n\t\t\tspare capacity, then that means the utilization is 90%, so use a\n\t\t\t\ttargetCapacity of 90. The default value of\n\t\t\t\t100 percent results in the Amazon EC2 instances in your Auto Scaling group\n\t\t\tbeing completely used.

" } }, "minimumScalingStepSize": { "target": "com.amazonaws.ecs#ManagedScalingStepSize", "traits": { - "smithy.api#documentation": "

The minimum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is\n\t\t\tnot affected by this parameter If this parameter is omitted, the default value of 1 is\n\t\t\tused.

\n

When additional capacity is required, Amazon ECS will scale up the minimum scaling step size even if the\n\t\t\tactual demand is less than the minimum scaling step size.

\n

If you use a capacity provider with an Auto Scaling group configured with more than one Amazon EC2\n\t\t\tinstance type or Availability Zone, Amazon ECS will scale up by the exact minimum scaling step size value\n\t\t\tand will ignore both the maximum scaling step size as well as the capacity demand.

" + "smithy.api#documentation": "

The minimum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale\n\t\t\tin process is not affected by this parameter If this parameter is omitted, the default\n\t\t\tvalue of 1 is used.

\n

When additional capacity is required, Amazon ECS will scale up the minimum scaling step\n\t\t\tsize even if the actual demand is less than the minimum scaling step size.

\n

If you use a capacity provider with an Auto Scaling group configured with more than\n\t\t\tone Amazon EC2 instance type or Availability Zone, Amazon ECS will scale up by the exact minimum\n\t\t\tscaling step size value and will ignore both the maximum scaling step size as well as\n\t\t\tthe capacity demand.

" } }, "maximumScalingStepSize": { "target": "com.amazonaws.ecs#ManagedScalingStepSize", "traits": { - "smithy.api#documentation": "

The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. If this parameter is\n\t\t\tomitted, the default value of 10000 is used.

" + "smithy.api#documentation": "

The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. If this\n\t\t\tparameter is omitted, the default value of 10000 is used.

" } }, "instanceWarmupPeriod": { "target": "com.amazonaws.ecs#ManagedScalingInstanceWarmupPeriod", "traits": { - "smithy.api#documentation": "

The period of time, in seconds, after a newly launched Amazon EC2 instance can contribute to CloudWatch metrics\n\t\t\tfor Auto Scaling group. If this parameter is omitted, the default value of 300 seconds is\n\t\t\tused.

" + "smithy.api#documentation": "

The period of time, in seconds, after a newly launched Amazon EC2 instance can contribute\n\t\t\tto CloudWatch metrics for Auto Scaling group. If this parameter is omitted, the default value\n\t\t\tof 300 seconds is used.

" } } }, "traits": { - "smithy.api#documentation": "

The managed scaling settings for the Auto Scaling group capacity provider.

\n

When managed scaling is turned on, Amazon ECS manages the scale-in and scale-out actions of the Auto\n\t\t\tScaling group. Amazon ECS manages a target tracking scaling policy using an Amazon ECS managed CloudWatch metric with\n\t\t\tthe specified targetCapacity value as the target value for the metric. For more\n\t\t\tinformation, see Using managed scaling in the Amazon Elastic Container Service Developer Guide.

\n

If managed scaling is off, the user must manage the scaling of the Auto Scaling group.

" + "smithy.api#documentation": "

The managed scaling settings for the Auto Scaling group capacity provider.

\n

When managed scaling is turned on, Amazon ECS manages the scale-in and scale-out actions of\n\t\t\tthe Auto Scaling group. Amazon ECS manages a target tracking scaling policy using an Amazon ECS\n\t\t\tmanaged CloudWatch metric with the specified targetCapacity value as the target\n\t\t\tvalue for the metric. For more information, see Using managed scaling in the Amazon Elastic Container Service Developer Guide.

\n

If managed scaling is off, the user must manage the scaling of the Auto Scaling\n\t\t\tgroup.

" } }, "com.amazonaws.ecs#ManagedScalingInstanceWarmupPeriod": { @@ -8467,7 +8467,7 @@ } }, "traits": { - "smithy.api#documentation": "

Amazon ECS can't determine the current version of the Amazon ECS container agent on the container instance and\n\t\t\tdoesn't have enough information to proceed with an update. This could be because the agent running on\n\t\t\tthe container instance is a previous or custom version that doesn't use our version information.

", + "smithy.api#documentation": "

Amazon ECS can't determine the current version of the Amazon ECS container agent on the\n\t\t\tcontainer instance and doesn't have enough information to proceed with an update. This\n\t\t\tcould be because the agent running on the container instance is a previous or custom\n\t\t\tversion that doesn't use our version information.

", "smithy.api#error": "client" } }, @@ -8477,7 +8477,7 @@ "sourceVolume": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of the volume to mount. Must be a volume name referenced in the name parameter\n\t\t\tof task definition volume.

" + "smithy.api#documentation": "

The name of the volume to mount. Must be a volume name referenced in the\n\t\t\t\tname parameter of task definition volume.

" } }, "containerPath": { @@ -8489,7 +8489,7 @@ "readOnly": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

If this value is true, the container has read-only access to the volume. If this value\n\t\t\tis false, then the container can write to the volume. The default value is\n\t\t\t\tfalse.

" + "smithy.api#documentation": "

If this value is true, the container has read-only access to the volume.\n\t\t\tIf this value is false, then the container can write to the volume. The\n\t\t\tdefault value is false.

" } } }, @@ -8548,18 +8548,18 @@ "containerPortRange": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The port number range on the container that's bound to the dynamically mapped host port range.

\n

The following rules apply when you specify a containerPortRange:

\n
    \n
  • \n

    You must use either the bridge network mode or the awsvpc\n\t\t\t\t\tnetwork mode.

    \n
  • \n
  • \n

    This parameter is available for both the EC2 and Fargate launch types.

    \n
  • \n
  • \n

    This parameter is available for both the Linux and Windows operating systems.

    \n
  • \n
  • \n

    The container instance must have at least version 1.67.0 of the container agent\n\t\t\t\t\tand at least version 1.67.0-1 of the ecs-init package

    \n
  • \n
  • \n

    You can specify a maximum of 100 port ranges per container.

    \n
  • \n
  • \n

    You do not specify a hostPortRange. The value of the hostPortRange is set\n\t\t\t\t\tas follows:

    \n
      \n
    • \n

      For containers in a task with the awsvpc network mode,\n\t\t\t\t\t\t\tthe hostPortRange is set to the same value as the\n\t\t\t\t\t\t\t\tcontainerPortRange. This is a static mapping\n\t\t\t\t\t\t\tstrategy.

      \n
    • \n
    • \n

      For containers in a task with the bridge network mode, the Amazon ECS agent finds open host ports from the default ephemeral range and passes it to docker to bind them to the container ports.

      \n
    • \n
    \n
  • \n
  • \n

    The containerPortRange valid values are between 1 and\n\t\t\t\t\t65535.

    \n
  • \n
  • \n

    A port can only be included in one port mapping per container.

    \n
  • \n
  • \n

    You cannot specify overlapping port ranges.

    \n
  • \n
  • \n

    The first port in the range must be less than last port in the range.

    \n
  • \n
  • \n

    Docker recommends that you turn off the docker-proxy in the Docker daemon config file when you have a large number of ports.

    \n

    For more information, see Issue #11185 on the Github website.

    \n

    For information about how to turn off the docker-proxy in the Docker daemon config file, see Docker daemon in the Amazon ECS Developer Guide.

    \n
  • \n
\n

You can call \n DescribeTasks\n to view the hostPortRange which\n\t\t\tare the host ports that are bound to the container ports.

" + "smithy.api#documentation": "

The port number range on the container that's bound to the dynamically mapped host\n\t\t\tport range.

\n

The following rules apply when you specify a containerPortRange:

\n
    \n
  • \n

    You must use either the bridge network mode or the awsvpc\n\t\t\t\t\tnetwork mode.

    \n
  • \n
  • \n

    This parameter is available for both the EC2 and Fargate launch types.

    \n
  • \n
  • \n

    This parameter is available for both the Linux and Windows operating systems.

    \n
  • \n
  • \n

    The container instance must have at least version 1.67.0 of the container agent\n\t\t\t\t\tand at least version 1.67.0-1 of the ecs-init package

    \n
  • \n
  • \n

    You can specify a maximum of 100 port ranges per container.

    \n
  • \n
  • \n

    You do not specify a hostPortRange. The value of the hostPortRange is set\n\t\t\t\t\tas follows:

    \n
      \n
    • \n

      For containers in a task with the awsvpc network mode,\n\t\t\t\t\t\t\tthe hostPortRange is set to the same value as the\n\t\t\t\t\t\t\t\tcontainerPortRange. This is a static mapping\n\t\t\t\t\t\t\tstrategy.

      \n
    • \n
    • \n

      For containers in a task with the bridge network mode, the Amazon ECS agent finds open host ports from the default ephemeral range and passes it to docker to bind them to the container ports.

      \n
    • \n
    \n
  • \n
  • \n

    The containerPortRange valid values are between 1 and\n\t\t\t\t\t65535.

    \n
  • \n
  • \n

    A port can only be included in one port mapping per container.

    \n
  • \n
  • \n

    You cannot specify overlapping port ranges.

    \n
  • \n
  • \n

    The first port in the range must be less than last port in the range.

    \n
  • \n
  • \n

    Docker recommends that you turn off the docker-proxy in the Docker daemon config file when you have a large number of ports.

    \n

    For more information, see Issue #11185 on the Github website.

    \n

    For information about how to turn off the docker-proxy in the Docker daemon config file, see Docker daemon in the Amazon ECS Developer Guide.

    \n
  • \n
\n

You can call \n DescribeTasks\n to view the hostPortRange which\n\t\t\tare the host ports that are bound to the container ports.

" } }, "hostPortRange": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The port number range on the host that's used with the network binding. This is assigned is assigned\n\t\t\tby Docker and delivered by the Amazon ECS agent.

" + "smithy.api#documentation": "

The port number range on the host that's used with the network binding. This is\n\t\t\tassigned is assigned by Docker and delivered by the Amazon ECS agent.

" } } }, "traits": { - "smithy.api#documentation": "

Details on the network bindings between a container and its host container instance. After a task\n\t\t\treaches the RUNNING status, manual and automatic host and container port assignments are\n\t\t\tvisible in the networkBindings section of DescribeTasks API\n\t\t\tresponses.

" + "smithy.api#documentation": "

Details on the network bindings between a container and its host container instance.\n\t\t\tAfter a task reaches the RUNNING status, manual and automatic host and\n\t\t\tcontainer port assignments are visible in the networkBindings section of\n\t\t\t\tDescribeTasks API\n\t\t\tresponses.

" } }, "com.amazonaws.ecs#NetworkBindings": { @@ -8605,7 +8605,7 @@ } }, "traits": { - "smithy.api#documentation": "

An object representing the elastic network interface for tasks that use the awsvpc\n\t\t\tnetwork mode.

" + "smithy.api#documentation": "

An object representing the elastic network interface for tasks that use the\n\t\t\t\tawsvpc network mode.

" } }, "com.amazonaws.ecs#NetworkInterfaces": { @@ -8654,7 +8654,7 @@ } }, "traits": { - "smithy.api#documentation": "

There's no update available for this Amazon ECS container agent. This might be because the agent is\n\t\t\talready running the latest version or because it's so old that there's no update path to the current\n\t\t\tversion.

", + "smithy.api#documentation": "

There's no update available for this Amazon ECS container agent. This might be because the\n\t\t\tagent is already running the latest version or because it's so old that there's no\n\t\t\tupdate path to the current version.

", "smithy.api#error": "client" } }, @@ -8734,18 +8734,18 @@ "type": { "target": "com.amazonaws.ecs#PlacementConstraintType", "traits": { - "smithy.api#documentation": "

The type of constraint. Use distinctInstance to ensure that each task in a particular\n\t\t\tgroup is running on a different container instance. Use memberOf to restrict the selection\n\t\t\tto a group of valid candidates.

" + "smithy.api#documentation": "

The type of constraint. Use distinctInstance to ensure that each task in\n\t\t\ta particular group is running on a different container instance. Use\n\t\t\t\tmemberOf to restrict the selection to a group of valid\n\t\t\tcandidates.

" } }, "expression": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

A cluster query language expression to apply to the constraint. The expression can have a maximum\n\t\t\tlength of 2000 characters. You can't specify an expression if the constraint type is\n\t\t\t\tdistinctInstance. For more information, see Cluster\n\t\t\t\tquery language in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

A cluster query language expression to apply to the constraint. The expression can\n\t\t\thave a maximum length of 2000 characters. You can't specify an expression if the\n\t\t\tconstraint type is distinctInstance. For more information, see Cluster query language in the Amazon Elastic Container Service Developer Guide.

" } } }, "traits": { - "smithy.api#documentation": "

An object representing a constraint on task placement. For more information, see Task\n\t\t\t\tplacement constraints in the Amazon Elastic Container Service Developer Guide.

\n \n

If you're using the Fargate launch type, task placement constraints aren't\n\t\t\t\tsupported.

\n
" + "smithy.api#documentation": "

An object representing a constraint on task placement. For more information, see\n\t\t\t\tTask placement constraints in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

\n \n

If you're using the Fargate launch type, task placement constraints\n\t\t\t\taren't supported.

\n
" } }, "com.amazonaws.ecs#PlacementConstraintType": { @@ -8783,18 +8783,18 @@ "type": { "target": "com.amazonaws.ecs#PlacementStrategyType", "traits": { - "smithy.api#documentation": "

The type of placement strategy. The random placement strategy randomly places tasks on\n\t\t\tavailable candidates. The spread placement strategy spreads placement across available\n\t\t\tcandidates evenly based on the field parameter. The binpack strategy places\n\t\t\ttasks on available candidates that have the least available amount of the resource that's specified\n\t\t\twith the field parameter. For example, if you binpack on memory, a task is placed on the\n\t\t\tinstance with the least amount of remaining memory but still enough to run the task.

" + "smithy.api#documentation": "

The type of placement strategy. The random placement strategy randomly\n\t\t\tplaces tasks on available candidates. The spread placement strategy spreads\n\t\t\tplacement across available candidates evenly based on the field parameter.\n\t\t\tThe binpack strategy places tasks on available candidates that have the\n\t\t\tleast available amount of the resource that's specified with the field\n\t\t\tparameter. For example, if you binpack on memory, a task is placed on the instance with\n\t\t\tthe least amount of remaining memory but still enough to run the task.

" } }, "field": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The field to apply the placement strategy against. For the spread placement strategy,\n\t\t\tvalid values are instanceId (or host, which has the same effect), or any\n\t\t\tplatform or custom attribute that's applied to a container instance, such as\n\t\t\t\tattribute:ecs.availability-zone. For the binpack placement strategy,\n\t\t\tvalid values are cpu and memory. For the random placement\n\t\t\tstrategy, this field is not used.

" + "smithy.api#documentation": "

The field to apply the placement strategy against. For the spread\n\t\t\tplacement strategy, valid values are instanceId (or host,\n\t\t\twhich has the same effect), or any platform or custom attribute that's applied to a\n\t\t\tcontainer instance, such as attribute:ecs.availability-zone. For the\n\t\t\t\tbinpack placement strategy, valid values are cpu and\n\t\t\t\tmemory. For the random placement strategy, this field is\n\t\t\tnot used.

" } } }, "traits": { - "smithy.api#documentation": "

The task placement strategy for a task or service. For more information, see Task\n\t\t\t\tplacement strategies in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The task placement strategy for a task or service. For more information, see Task placement strategies in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#PlacementStrategyType": { @@ -8826,20 +8826,20 @@ "id": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The ID for the GPUs on the container instance. The available GPU IDs can also be obtained on the\n\t\t\tcontainer instance in the /var/lib/ecs/gpu/nvidia_gpu_info.json file.

", + "smithy.api#documentation": "

The ID for the GPUs on the container instance. The available GPU IDs can also be\n\t\t\tobtained on the container instance in the\n\t\t\t\t/var/lib/ecs/gpu/nvidia_gpu_info.json file.

", "smithy.api#required": {} } }, "type": { "target": "com.amazonaws.ecs#PlatformDeviceType", "traits": { - "smithy.api#documentation": "

The type of device that's available on the container instance. The only supported value is\n\t\t\t\tGPU.

", + "smithy.api#documentation": "

The type of device that's available on the container instance. The only supported\n\t\t\tvalue is GPU.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

The devices that are available on the container instance. The only supported device type is a\n\t\t\tGPU.

" + "smithy.api#documentation": "

The devices that are available on the container instance. The only supported device\n\t\t\ttype is a GPU.

" } }, "com.amazonaws.ecs#PlatformDeviceType": { @@ -8895,42 +8895,42 @@ "containerPort": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The port number on the container that's bound to the user-specified or automatically assigned host\n\t\t\tport.

\n

If you use containers in a task with the awsvpc or host network mode,\n\t\t\tspecify the exposed ports using containerPort.

\n

If you use containers in a task with the bridge network mode and you specify a container\n\t\t\tport and not a host port, your container automatically receives a host port in the ephemeral port\n\t\t\trange. For more information, see hostPort. Port mappings that are automatically assigned\n\t\t\tin this way do not count toward the 100 reserved ports limit of a container instance.

" + "smithy.api#documentation": "

The port number on the container that's bound to the user-specified or automatically\n\t\t\tassigned host port.

\n

If you use containers in a task with the awsvpc or host\n\t\t\tnetwork mode, specify the exposed ports using containerPort.

\n

If you use containers in a task with the bridge network mode and you\n\t\t\tspecify a container port and not a host port, your container automatically receives a\n\t\t\thost port in the ephemeral port range. For more information, see hostPort.\n\t\t\tPort mappings that are automatically assigned in this way do not count toward the 100\n\t\t\treserved ports limit of a container instance.

" } }, "hostPort": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The port number on the container instance to reserve for your container.

\n

If you specify a containerPortRange, leave this field empty and the value of the\n\t\t\t\thostPort is set as follows:

\n
    \n
  • \n

    For containers in a task with the awsvpc network mode, the hostPort\n\t\t\t\t\tis set to the same value as the containerPort. This is a static mapping\n\t\t\t\t\tstrategy.

    \n
  • \n
  • \n

    For containers in a task with the bridge network mode, the Amazon ECS agent finds\n\t\t\t\t\topen ports on the host and automatically binds them to the container ports. This is a dynamic\n\t\t\t\t\tmapping strategy.

    \n
  • \n
\n

If you use containers in a task with the awsvpc or host network mode, the\n\t\t\t\thostPort can either be left blank or set to the same value as the\n\t\t\t\tcontainerPort.

\n

If you use containers in a task with the bridge network mode, you can specify a\n\t\t\tnon-reserved host port for your container port mapping, or you can omit the hostPort (or\n\t\t\tset it to 0) while specifying a containerPort and your container\n\t\t\tautomatically receives a port in the ephemeral port range for your container instance operating system\n\t\t\tand Docker version.

\n

The default ephemeral port range for Docker version 1.6.0 and later is listed on the instance under\n\t\t\t\t/proc/sys/net/ipv4/ip_local_port_range. If this kernel parameter is unavailable, the\n\t\t\tdefault ephemeral port range from 49153 through 65535 (Linux) or 49152 through 65535 (Windows) is used.\n\t\t\tDo not attempt to specify a host port in the ephemeral port range as these are reserved for automatic\n\t\t\tassignment. In general, ports below 32768 are outside of the ephemeral port range.

\n

The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the Amazon ECS container\n\t\t\tagent ports 51678-51680. Any host port that was previously specified in a running task is also reserved\n\t\t\twhile the task is running. That is, after a task stops, the host port is released. The current reserved\n\t\t\tports are displayed in the remainingResources of DescribeContainerInstances output. A container instance can have up to 100 reserved ports\n\t\t\tat a time. This number includes the default reserved ports. Automatically assigned ports aren't\n\t\t\tincluded in the 100 reserved ports quota.

" + "smithy.api#documentation": "

The port number on the container instance to reserve for your container.

\n

If you specify a containerPortRange, leave this field empty and the value\n\t\t\tof the hostPort is set as follows:

\n
    \n
  • \n

    For containers in a task with the awsvpc network mode, the\n\t\t\t\t\t\thostPort is set to the same value as the\n\t\t\t\t\t\tcontainerPort. This is a static mapping strategy.

    \n
  • \n
  • \n

    For containers in a task with the bridge network mode, the Amazon ECS\n\t\t\t\t\tagent finds open ports on the host and automatically binds them to the container\n\t\t\t\t\tports. This is a dynamic mapping strategy.

    \n
  • \n
\n

If you use containers in a task with the awsvpc or host\n\t\t\tnetwork mode, the hostPort can either be left blank or set to the same\n\t\t\tvalue as the containerPort.

\n

If you use containers in a task with the bridge network mode, you can\n\t\t\tspecify a non-reserved host port for your container port mapping, or you can omit the\n\t\t\t\thostPort (or set it to 0) while specifying a\n\t\t\t\tcontainerPort and your container automatically receives a port in the\n\t\t\tephemeral port range for your container instance operating system and Docker\n\t\t\tversion.

\n

The default ephemeral port range for Docker version 1.6.0 and later is listed on the\n\t\t\tinstance under /proc/sys/net/ipv4/ip_local_port_range. If this kernel\n\t\t\tparameter is unavailable, the default ephemeral port range from 49153 through 65535\n\t\t\t(Linux) or 49152 through 65535 (Windows) is used. Do not attempt to specify a host port\n\t\t\tin the ephemeral port range as these are reserved for automatic assignment. In general,\n\t\t\tports below 32768 are outside of the ephemeral port range.

\n

The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the\n\t\t\tAmazon ECS container agent ports 51678-51680. Any host port that was previously specified in\n\t\t\ta running task is also reserved while the task is running. That is, after a task stops,\n\t\t\tthe host port is released. The current reserved ports are displayed in the\n\t\t\t\tremainingResources of DescribeContainerInstances output. A container instance can have up to 100\n\t\t\treserved ports at a time. This number includes the default reserved ports. Automatically\n\t\t\tassigned ports aren't included in the 100 reserved ports quota.

" } }, "protocol": { "target": "com.amazonaws.ecs#TransportProtocol", "traits": { - "smithy.api#documentation": "

The protocol used for the port mapping. Valid values are tcp and udp. The\n\t\t\tdefault is tcp. protocol is immutable in a Service Connect service. Updating\n\t\t\tthis field requires a service deletion and redeployment.

" + "smithy.api#documentation": "

The protocol used for the port mapping. Valid values are tcp and\n\t\t\t\tudp. The default is tcp. protocol is\n\t\t\timmutable in a Service Connect service. Updating this field requires a service deletion\n\t\t\tand redeployment.

" } }, "name": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name that's used for the port mapping. This parameter is the name that you use in the \n\t\t\tserviceConnectConfiguration and the vpcLatticeConfigurations of a service. \n\t\t\tThe name can include up to 64 characters. The characters can include lowercase letters, numbers, \n\t\t\tunderscores (_), and hyphens (-). The name can't start with a hyphen.

" + "smithy.api#documentation": "

The name that's used for the port mapping. This parameter is the name that you use in\n\t\t\tthe serviceConnectConfiguration and the\n\t\t\t\tvpcLatticeConfigurations of a service. The name can include up to 64\n\t\t\tcharacters. The characters can include lowercase letters, numbers, underscores (_), and\n\t\t\thyphens (-). The name can't start with a hyphen.

" } }, "appProtocol": { "target": "com.amazonaws.ecs#ApplicationProtocol", "traits": { - "smithy.api#documentation": "

The application protocol that's used for the port mapping. This parameter only applies to\n\t\t\tService Connect. We recommend that you set this parameter to be consistent with the protocol that your\n\t\t\tapplication uses. If you set this parameter, Amazon ECS adds protocol-specific connection handling to the\n\t\t\tService Connect proxy. If you set this parameter, Amazon ECS adds protocol-specific telemetry in the Amazon ECS\n\t\t\tconsole and CloudWatch.

\n

If you don't set a value for this parameter, then TCP is used. However, Amazon ECS doesn't add\n\t\t\tprotocol-specific telemetry for TCP.

\n

\n appProtocol is immutable in a Service Connect service. Updating this field requires a\n\t\t\tservice deletion and redeployment.

\n

Tasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The application protocol that's used for the port mapping. This parameter only applies\n\t\t\tto Service Connect. We recommend that you set this parameter to be consistent with the\n\t\t\tprotocol that your application uses. If you set this parameter, Amazon ECS adds\n\t\t\tprotocol-specific connection handling to the Service Connect proxy. If you set this\n\t\t\tparameter, Amazon ECS adds protocol-specific telemetry in the Amazon ECS console and CloudWatch.

\n

If you don't set a value for this parameter, then TCP is used. However, Amazon ECS doesn't\n\t\t\tadd protocol-specific telemetry for TCP.

\n

\n appProtocol is immutable in a Service Connect service. Updating this\n\t\t\tfield requires a service deletion and redeployment.

\n

Tasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.

" } }, "containerPortRange": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The port number range on the container that's bound to the dynamically mapped host port range.

\n

The following rules apply when you specify a containerPortRange:

\n
    \n
  • \n

    You must use either the bridge network mode or the awsvpc\n\t\t\t\t\tnetwork mode.

    \n
  • \n
  • \n

    This parameter is available for both the EC2 and Fargate launch types.

    \n
  • \n
  • \n

    This parameter is available for both the Linux and Windows operating systems.

    \n
  • \n
  • \n

    The container instance must have at least version 1.67.0 of the container agent\n\t\t\t\t\tand at least version 1.67.0-1 of the ecs-init package

    \n
  • \n
  • \n

    You can specify a maximum of 100 port ranges per container.

    \n
  • \n
  • \n

    You do not specify a hostPortRange. The value of the hostPortRange is set\n\t\t\t\t\tas follows:

    \n
      \n
    • \n

      For containers in a task with the awsvpc network mode,\n\t\t\t\t\t\t\tthe hostPortRange is set to the same value as the\n\t\t\t\t\t\t\t\tcontainerPortRange. This is a static mapping\n\t\t\t\t\t\t\tstrategy.

      \n
    • \n
    • \n

      For containers in a task with the bridge network mode, the Amazon ECS agent finds open host ports from the default ephemeral range and passes it to docker to bind them to the container ports.

      \n
    • \n
    \n
  • \n
  • \n

    The containerPortRange valid values are between 1 and\n\t\t\t\t\t65535.

    \n
  • \n
  • \n

    A port can only be included in one port mapping per container.

    \n
  • \n
  • \n

    You cannot specify overlapping port ranges.

    \n
  • \n
  • \n

    The first port in the range must be less than last port in the range.

    \n
  • \n
  • \n

    Docker recommends that you turn off the docker-proxy in the Docker daemon config file when you have a large number of ports.

    \n

    For more information, see Issue #11185 on the Github website.

    \n

    For information about how to turn off the docker-proxy in the Docker daemon config file, see Docker daemon in the Amazon ECS Developer Guide.

    \n
  • \n
\n

You can call \n DescribeTasks\n to view the hostPortRange which\n\t\t\tare the host ports that are bound to the container ports.

" + "smithy.api#documentation": "

The port number range on the container that's bound to the dynamically mapped host\n\t\t\tport range.

\n

The following rules apply when you specify a containerPortRange:

\n
    \n
  • \n

    You must use either the bridge network mode or the awsvpc\n\t\t\t\t\tnetwork mode.

    \n
  • \n
  • \n

    This parameter is available for both the EC2 and Fargate launch types.

    \n
  • \n
  • \n

    This parameter is available for both the Linux and Windows operating systems.

    \n
  • \n
  • \n

    The container instance must have at least version 1.67.0 of the container agent\n\t\t\t\t\tand at least version 1.67.0-1 of the ecs-init package

    \n
  • \n
  • \n

    You can specify a maximum of 100 port ranges per container.

    \n
  • \n
  • \n

    You do not specify a hostPortRange. The value of the hostPortRange is set\n\t\t\t\t\tas follows:

    \n
      \n
    • \n

      For containers in a task with the awsvpc network mode,\n\t\t\t\t\t\t\tthe hostPortRange is set to the same value as the\n\t\t\t\t\t\t\t\tcontainerPortRange. This is a static mapping\n\t\t\t\t\t\t\tstrategy.

      \n
    • \n
    • \n

      For containers in a task with the bridge network mode, the Amazon ECS agent finds open host ports from the default ephemeral range and passes it to docker to bind them to the container ports.

      \n
    • \n
    \n
  • \n
  • \n

    The containerPortRange valid values are between 1 and\n\t\t\t\t\t65535.

    \n
  • \n
  • \n

    A port can only be included in one port mapping per container.

    \n
  • \n
  • \n

    You cannot specify overlapping port ranges.

    \n
  • \n
  • \n

    The first port in the range must be less than last port in the range.

    \n
  • \n
  • \n

    Docker recommends that you turn off the docker-proxy in the Docker daemon config file when you have a large number of ports.

    \n

    For more information, see Issue #11185 on the Github website.

    \n

    For information about how to turn off the docker-proxy in the Docker daemon config file, see Docker daemon in the Amazon ECS Developer Guide.

    \n
  • \n
\n

You can call \n DescribeTasks\n to view the hostPortRange which\n\t\t\tare the host ports that are bound to the container ports.

" } } }, "traits": { - "smithy.api#documentation": "

Port mappings allow containers to access ports on the host container instance to send or receive\n\t\t\ttraffic. Port mappings are specified as part of the container definition.

\n

If you use containers in a task with the awsvpc or host network mode,\n\t\t\tspecify the exposed ports using containerPort. The hostPort can be left blank\n\t\t\tor it must be the same value as the containerPort.

\n

Most fields of this parameter (containerPort, hostPort,\n\t\t\t\tprotocol) maps to PortBindings in the docker container create command and\n\t\t\tthe --publish option to docker run. If the network mode of a task definition\n\t\t\tis set to host, host ports must either be undefined or match the container port in the\n\t\t\tport mapping.

\n \n

You can't expose the same container port for multiple protocols. If you attempt this, an error is\n\t\t\t\treturned.

\n
\n

After a task reaches the RUNNING status, manual and automatic host and container port\n\t\t\tassignments are visible in the networkBindings section of DescribeTasks API\n\t\t\tresponses.

" + "smithy.api#documentation": "

Port mappings allow containers to access ports on the host container instance to send\n\t\t\tor receive traffic. Port mappings are specified as part of the container\n\t\t\tdefinition.

\n

If you use containers in a task with the awsvpc or host\n\t\t\tnetwork mode, specify the exposed ports using containerPort. The\n\t\t\t\thostPort can be left blank or it must be the same value as the\n\t\t\t\tcontainerPort.

\n

Most fields of this parameter (containerPort, hostPort,\n\t\t\t\tprotocol) maps to PortBindings in the docker container\n\t\t\tcreate command and the --publish option to docker run. If the\n\t\t\tnetwork mode of a task definition is set to host, host ports must either be\n\t\t\tundefined or match the container port in the port mapping.

\n \n

You can't expose the same container port for multiple protocols. If you attempt\n\t\t\t\tthis, an error is returned.

\n
\n

After a task reaches the RUNNING status, manual and automatic host and\n\t\t\tcontainer port assignments are visible in the networkBindings section of\n\t\t\t\tDescribeTasks API\n\t\t\tresponses.

" } }, "com.amazonaws.ecs#PortMappingList": { @@ -8984,7 +8984,7 @@ "target": "com.amazonaws.ecs#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

The protection status of the task. If scale-in protection is on for a task, the value is\n\t\t\t\ttrue. Otherwise, it is false.

" + "smithy.api#documentation": "

The protection status of the task. If scale-in protection is on for a task, the value\n\t\t\tis true. Otherwise, it is false.

" } }, "expirationDate": { @@ -8995,7 +8995,7 @@ } }, "traits": { - "smithy.api#documentation": "

An object representing the protection status details for a task. You can set the protection status\n\t\t\twith the UpdateTaskProtection API and get the status of tasks with the GetTaskProtection\n\t\t\tAPI.

" + "smithy.api#documentation": "

An object representing the protection status details for a task. You can set the\n\t\t\tprotection status with the UpdateTaskProtection API and get the status of tasks with the GetTaskProtection API.

" } }, "com.amazonaws.ecs#ProtectedTasks": { @@ -9023,12 +9023,12 @@ "properties": { "target": "com.amazonaws.ecs#ProxyConfigurationProperties", "traits": { - "smithy.api#documentation": "

The set of network configuration parameters to provide the Container Network Interface (CNI) plugin,\n\t\t\tspecified as key-value pairs.

\n
    \n
  • \n

    \n IgnoredUID - (Required) The user ID (UID) of the proxy container as\n\t\t\t\t\tdefined by the user parameter in a container definition. This is used to ensure\n\t\t\t\t\tthe proxy ignores its own traffic. If IgnoredGID is specified, this field can be\n\t\t\t\t\tempty.

    \n
  • \n
  • \n

    \n IgnoredGID - (Required) The group ID (GID) of the proxy container as\n\t\t\t\t\tdefined by the user parameter in a container definition. This is used to ensure\n\t\t\t\t\tthe proxy ignores its own traffic. If IgnoredUID is specified, this field can be\n\t\t\t\t\tempty.

    \n
  • \n
  • \n

    \n AppPorts - (Required) The list of ports that the application uses. Network\n\t\t\t\t\ttraffic to these ports is forwarded to the ProxyIngressPort and\n\t\t\t\t\t\tProxyEgressPort.

    \n
  • \n
  • \n

    \n ProxyIngressPort - (Required) Specifies the port that incoming traffic to\n\t\t\t\t\tthe AppPorts is directed to.

    \n
  • \n
  • \n

    \n ProxyEgressPort - (Required) Specifies the port that outgoing traffic from\n\t\t\t\t\tthe AppPorts is directed to.

    \n
  • \n
  • \n

    \n EgressIgnoredPorts - (Required) The egress traffic going to the specified\n\t\t\t\t\tports is ignored and not redirected to the ProxyEgressPort. It can be an empty\n\t\t\t\t\tlist.

    \n
  • \n
  • \n

    \n EgressIgnoredIPs - (Required) The egress traffic going to the specified IP\n\t\t\t\t\taddresses is ignored and not redirected to the ProxyEgressPort. It can be an empty\n\t\t\t\t\tlist.

    \n
  • \n
" + "smithy.api#documentation": "

The set of network configuration parameters to provide the Container Network Interface\n\t\t\t(CNI) plugin, specified as key-value pairs.

\n
    \n
  • \n

    \n IgnoredUID - (Required) The user ID (UID) of the proxy\n\t\t\t\t\tcontainer as defined by the user parameter in a container\n\t\t\t\t\tdefinition. This is used to ensure the proxy ignores its own traffic. If\n\t\t\t\t\t\tIgnoredGID is specified, this field can be empty.

    \n
  • \n
  • \n

    \n IgnoredGID - (Required) The group ID (GID) of the proxy\n\t\t\t\t\tcontainer as defined by the user parameter in a container\n\t\t\t\t\tdefinition. This is used to ensure the proxy ignores its own traffic. If\n\t\t\t\t\t\tIgnoredUID is specified, this field can be empty.

    \n
  • \n
  • \n

    \n AppPorts - (Required) The list of ports that the\n\t\t\t\t\tapplication uses. Network traffic to these ports is forwarded to the\n\t\t\t\t\t\tProxyIngressPort and ProxyEgressPort.

    \n
  • \n
  • \n

    \n ProxyIngressPort - (Required) Specifies the port that\n\t\t\t\t\tincoming traffic to the AppPorts is directed to.

    \n
  • \n
  • \n

    \n ProxyEgressPort - (Required) Specifies the port that\n\t\t\t\t\toutgoing traffic from the AppPorts is directed to.

    \n
  • \n
  • \n

    \n EgressIgnoredPorts - (Required) The egress traffic going to\n\t\t\t\t\tthe specified ports is ignored and not redirected to the\n\t\t\t\t\t\tProxyEgressPort. It can be an empty list.

    \n
  • \n
  • \n

    \n EgressIgnoredIPs - (Required) The egress traffic going to\n\t\t\t\t\tthe specified IP addresses is ignored and not redirected to the\n\t\t\t\t\t\tProxyEgressPort. It can be an empty list.

    \n
  • \n
" } } }, "traits": { - "smithy.api#documentation": "

The configuration details for the App Mesh proxy.

\n

For tasks that use the EC2 launch type, the container instances require at least\n\t\t\tversion 1.26.0 of the container agent and at least version 1.26.0-1 of the ecs-init\n\t\t\tpackage to use a proxy configuration. If your container instances are launched from the Amazon ECS optimized\n\t\t\tAMI version 20190301 or later, then they contain the required versions of the container\n\t\t\tagent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI\n

" + "smithy.api#documentation": "

The configuration details for the App Mesh proxy.

\n

For tasks that use the EC2 launch type, the container instances require\n\t\t\tat least version 1.26.0 of the container agent and at least version 1.26.0-1 of the\n\t\t\t\tecs-init package to use a proxy configuration. If your container\n\t\t\tinstances are launched from the Amazon ECS optimized AMI version 20190301 or\n\t\t\tlater, then they contain the required versions of the container agent and\n\t\t\t\tecs-init. For more information, see Amazon ECS-optimized Linux AMI\n

" } }, "com.amazonaws.ecs#ProxyConfigurationProperties": { @@ -9068,7 +9068,7 @@ } ], "traits": { - "smithy.api#documentation": "

Modifies an account setting. Account settings are set on a per-Region basis.

\n

If you change the root user account setting, the default settings are reset for users and roles that do\n\t\t\tnot have specified individual account settings. For more information, see Account Settings in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

", + "smithy.api#documentation": "

Modifies an account setting. Account settings are set on a per-Region basis.

\n

If you change the root user account setting, the default settings are reset for users and\n\t\t\troles that do not have specified individual account settings. For more information, see\n\t\t\t\tAccount\n\t\t\t\tSettings in the Amazon Elastic Container Service Developer Guide.

", "smithy.api#examples": [ { "title": "To modify the account settings for a specific IAM user or IAM role", @@ -9124,7 +9124,7 @@ } ], "traits": { - "smithy.api#documentation": "

Modifies an account setting for all users on an account for whom no individual account setting has\n\t\t\tbeen specified. Account settings are set on a per-Region basis.

", + "smithy.api#documentation": "

Modifies an account setting for all users on an account for whom no individual account\n\t\t\tsetting has been specified. Account settings are set on a per-Region basis.

", "smithy.api#examples": [ { "title": "To modify the default account settings for all IAM users or roles on an account", @@ -9150,14 +9150,14 @@ "name": { "target": "com.amazonaws.ecs#SettingName", "traits": { - "smithy.api#documentation": "

The resource name for which to modify the account setting.

\n

The following are the valid values for the account setting name.

\n
    \n
  • \n

    \n serviceLongArnFormat - When modified, the Amazon Resource Name (ARN) and\n\t\t\t\t\tresource ID format of the resource type for a specified user, role, or the root user for an\n\t\t\t\t\taccount is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource\n\t\t\t\t\tseparately. The ARN and resource ID format of a resource is defined by the opt-in status of\n\t\t\t\t\tthe user or role that created the resource. You must turn on this setting to use Amazon ECS features\n\t\t\t\t\tsuch as resource tagging.

    \n
  • \n
  • \n

    \n taskLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource\n\t\t\t\t\tID format of the resource type for a specified user, role, or the root user for an account is\n\t\t\t\t\taffected. The opt-in and opt-out account setting must be set for each Amazon ECS resource\n\t\t\t\t\tseparately. The ARN and resource ID format of a resource is defined by the opt-in status of\n\t\t\t\t\tthe user or role that created the resource. You must turn on this setting to use Amazon ECS features\n\t\t\t\t\tsuch as resource tagging.

    \n
  • \n
  • \n

    \n containerInstanceLongArnFormat - When modified, the Amazon Resource Name (ARN)\n\t\t\t\t\tand resource ID format of the resource type for a specified user, role, or the root user for an\n\t\t\t\t\taccount is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource\n\t\t\t\t\tseparately. The ARN and resource ID format of a resource is defined by the opt-in status of\n\t\t\t\t\tthe user or role that created the resource. You must turn on this setting to use Amazon ECS features\n\t\t\t\t\tsuch as resource tagging.

    \n
  • \n
  • \n

    \n awsvpcTrunking - When modified, the elastic network interface (ENI) limit for\n\t\t\t\t\tany new container instances that support the feature is changed. If awsvpcTrunking\n\t\t\t\t\tis turned on, any new container instances that support the feature are launched have the\n\t\t\t\t\tincreased ENI limits available to them. For more information, see Elastic Network Interface\n\t\t\t\t\t\tTrunking in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    \n containerInsights - Container Insights with enhanced observability provides\n\t\t\t\t\tall the Container Insights metrics, plus additional task and container metrics.\n\t\t\t\t\tThis version supports enhanced observability for Amazon ECS clusters using the Amazon EC2\n\t\t\t\t\tand Fargate launch types. After you configure Container Insights with enhanced\n\t\t\t\t\tobservability on Amazon ECS, Container Insights auto-collects detailed infrastructure\n\t\t\t\t\ttelemetry from the cluster level down to the container level in your environment and\n\t\t\t\t\tdisplays these critical performance data in curated dashboards removing the\n\t\t\t\t\theavy lifting in observability set-up.

    \n

    To use Container Insights with enhanced observability, set the\n\t\t\t\t\t\tcontainerInsights account setting to\n\t\t\t\t\tenhanced.

    \n

    To use Container Insights, set the containerInsights account\n\t\t\t\t\tsetting to enabled.

    \n

    For more information, see Monitor Amazon ECS containers using Container Insights with enhanced observability in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    \n dualStackIPv6 - When turned on, when using a VPC in dual stack mode, your tasks\n\t\t\t\t\tusing the awsvpc network mode can have an IPv6 address assigned. For more\n\t\t\t\t\tinformation on using IPv6 with tasks launched on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks\n\t\t\t\t\tlaunched on Fargate, see Using a VPC in dual-stack mode.

    \n
  • \n
  • \n

    \n fargateFIPSMode - If you specify fargateFIPSMode, Fargate FIPS\n\t\t\t\t\t140 compliance is affected.

    \n
  • \n
  • \n

    \n fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a security or\n\t\t\t\t\tinfrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be\n\t\t\t\t\tstopped and new tasks launched to replace them. Use\n\t\t\t\t\t\tfargateTaskRetirementWaitPeriod to configure the wait time to retire a\n\t\t\t\t\tFargate task. For information about the Fargate tasks maintenance, see Amazon Web Services\n\t\t\t\t\t\tFargate task maintenance in the Amazon ECS Developer\n\t\t\t\t\tGuide.

    \n
  • \n
  • \n

    \n tagResourceAuthorization - Amazon ECS is introducing tagging authorization for\n\t\t\t\t\tresource creation. Users must have permissions for actions that create the resource, such as\n\t\t\t\t\t\tecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services\n\t\t\t\t\tperforms additional authorization to verify if users or roles have permissions to create tags.\n\t\t\t\t\tTherefore, you must grant explicit permissions to use the ecs:TagResource action.\n\t\t\t\t\tFor more information, see Grant permission\n\t\t\t\t\t\tto tag resources on creation in the Amazon ECS Developer\n\t\t\t\t\tGuide.

    \n
  • \n
  • \n

    \n guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether\n\t\t\tAmazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your\n\t\t\tAmazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.

    \n
  • \n
", + "smithy.api#documentation": "

The resource name for which to modify the account setting.

\n

The following are the valid values for the account setting name.

\n
    \n
  • \n

    \n serviceLongArnFormat - When modified, the Amazon Resource Name\n\t\t\t\t\t(ARN) and resource ID format of the resource type for a specified user, role, or\n\t\t\t\t\tthe root user for an account is affected. The opt-in and opt-out account setting\n\t\t\t\t\tmust be set for each Amazon ECS resource separately. The ARN and resource ID format\n\t\t\t\t\tof a resource is defined by the opt-in status of the user or role that created\n\t\t\t\t\tthe resource. You must turn on this setting to use Amazon ECS features such as\n\t\t\t\t\tresource tagging.

    \n
  • \n
  • \n

    \n taskLongArnFormat - When modified, the Amazon Resource Name (ARN)\n\t\t\t\t\tand resource ID format of the resource type for a specified user, role, or the\n\t\t\t\t\troot user for an account is affected. The opt-in and opt-out account setting must\n\t\t\t\t\tbe set for each Amazon ECS resource separately. The ARN and resource ID format of a\n\t\t\t\t\tresource is defined by the opt-in status of the user or role that created the\n\t\t\t\t\tresource. You must turn on this setting to use Amazon ECS features such as resource\n\t\t\t\t\ttagging.

    \n
  • \n
  • \n

    \n containerInstanceLongArnFormat - When modified, the Amazon\n\t\t\t\t\tResource Name (ARN) and resource ID format of the resource type for a specified\n\t\t\t\t\tuser, role, or the root user for an account is affected. The opt-in and opt-out\n\t\t\t\t\taccount setting must be set for each Amazon ECS resource separately. The ARN and\n\t\t\t\t\tresource ID format of a resource is defined by the opt-in status of the user or\n\t\t\t\t\trole that created the resource. You must turn on this setting to use Amazon ECS\n\t\t\t\t\tfeatures such as resource tagging.

    \n
  • \n
  • \n

    \n awsvpcTrunking - When modified, the elastic network interface\n\t\t\t\t\t(ENI) limit for any new container instances that support the feature is changed.\n\t\t\t\t\tIf awsvpcTrunking is turned on, any new container instances that\n\t\t\t\t\tsupport the feature are launched have the increased ENI limits available to\n\t\t\t\t\tthem. For more information, see Elastic\n\t\t\t\t\t\tNetwork Interface Trunking in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    \n containerInsights - Container Insights with enhanced\n\t\t\t\t\tobservability provides all the Container Insights metrics, plus additional task\n\t\t\t\t\tand container metrics. This version supports enhanced observability for Amazon ECS\n\t\t\t\t\tclusters using the Amazon EC2 and Fargate launch types. After you configure\n\t\t\t\t\tContainer Insights with enhanced observability on Amazon ECS, Container Insights\n\t\t\t\t\tauto-collects detailed infrastructure telemetry from the cluster level down to\n\t\t\t\t\tthe container level in your environment and displays these critical performance\n\t\t\t\t\tdata in curated dashboards removing the heavy lifting in observability set-up.

    \n

    To use Container Insights with enhanced observability, set the\n\t\t\t\t\t\tcontainerInsights account setting to\n\t\t\t\t\tenhanced.

    \n

    To use Container Insights, set the containerInsights account\n\t\t\t\t\tsetting to enabled.

    \n

    For more information, see Monitor Amazon ECS containers using Container Insights with enhanced\n\t\t\t\t\t\tobservability in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    \n dualStackIPv6 - When turned on, when using a VPC in dual stack\n\t\t\t\t\tmode, your tasks using the awsvpc network mode can have an IPv6\n\t\t\t\t\taddress assigned. For more information on using IPv6 with tasks launched on\n\t\t\t\t\tAmazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6\n\t\t\t\t\twith tasks launched on Fargate, see Using a VPC in dual-stack mode.

    \n
  • \n
  • \n

    \n fargateFIPSMode - If you specify fargateFIPSMode,\n\t\t\t\t\tFargate FIPS 140 compliance is affected.

    \n
  • \n
  • \n

    \n fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a\n\t\t\t\t\tsecurity or infrastructure update is needed for an Amazon ECS task hosted on\n\t\t\t\t\tFargate, the tasks need to be stopped and new tasks launched to replace them.\n\t\t\t\t\tUse fargateTaskRetirementWaitPeriod to configure the wait time to\n\t\t\t\t\tretire a Fargate task. For information about the Fargate tasks maintenance,\n\t\t\t\t\tsee Amazon Web Services Fargate\n\t\t\t\t\t\ttask maintenance in the Amazon ECS Developer\n\t\t\t\t\tGuide.

    \n
  • \n
  • \n

    \n tagResourceAuthorization - Amazon ECS is introducing tagging\n\t\t\t\t\tauthorization for resource creation. Users must have permissions for actions\n\t\t\t\t\tthat create the resource, such as ecsCreateCluster. If tags are\n\t\t\t\t\tspecified when you create a resource, Amazon Web Services performs additional authorization to\n\t\t\t\t\tverify if users or roles have permissions to create tags. Therefore, you must\n\t\t\t\t\tgrant explicit permissions to use the ecs:TagResource action. For\n\t\t\t\t\tmore information, see Grant permission to tag resources on creation in the\n\t\t\t\t\t\tAmazon ECS Developer Guide.

    \n
  • \n
  • \n

    \n guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether\n\t\t\tAmazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your\n\t\t\tAmazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.

    \n
  • \n
", "smithy.api#required": {} } }, "value": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The account setting value for the specified principal ARN. Accepted values are\n\t\t\t\tenabled, disabled, on, enhanced,\n\t\t\tand off.

\n

When you specify fargateTaskRetirementWaitPeriod for the\n\t\t\t\tname, the following are the valid values:

\n
    \n
  • \n

    \n 0 - Amazon Web Services sends the notification, and immediately retires the affected\n\t\t\t\t\ttasks.

    \n
  • \n
  • \n

    \n 7 - Amazon Web Services sends the notification, and waits 7 calendar days to retire the\n\t\t\t\t\ttasks.

    \n
  • \n
  • \n

    \n 14 - Amazon Web Services sends the notification, and waits 14 calendar days to retire the\n\t\t\t\t\ttasks.

    \n
  • \n
", + "smithy.api#documentation": "

The account setting value for the specified principal ARN. Accepted values are\n\t\t\t\tenabled, disabled, on, enhanced,\n\t\t\tand off.

\n

When you specify fargateTaskRetirementWaitPeriod for the\n\t\t\t\tname, the following are the valid values:

\n
    \n
  • \n

    \n 0 - Amazon Web Services sends the notification, and immediately retires the\n\t\t\t\t\taffected tasks.

    \n
  • \n
  • \n

    \n 7 - Amazon Web Services sends the notification, and waits 7 calendar days to\n\t\t\t\t\tretire the tasks.

    \n
  • \n
  • \n

    \n 14 - Amazon Web Services sends the notification, and waits 14 calendar days to\n\t\t\t\t\tretire the tasks.

    \n
  • \n
", "smithy.api#required": {} } } @@ -9186,21 +9186,21 @@ "name": { "target": "com.amazonaws.ecs#SettingName", "traits": { - "smithy.api#documentation": "

The Amazon ECS account setting name to modify.

\n

The following are the valid values for the account setting name.

\n
    \n
  • \n

    \n serviceLongArnFormat - When modified, the Amazon Resource Name (ARN) and\n\t\t\t\t\tresource ID format of the resource type for a specified user, role, or the root user for an\n\t\t\t\t\taccount is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource\n\t\t\t\t\tseparately. The ARN and resource ID format of a resource is defined by the opt-in status of\n\t\t\t\t\tthe user or role that created the resource. You must turn on this setting to use Amazon ECS features\n\t\t\t\t\tsuch as resource tagging.

    \n
  • \n
  • \n

    \n taskLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource\n\t\t\t\t\tID format of the resource type for a specified user, role, or the root user for an account is\n\t\t\t\t\taffected. The opt-in and opt-out account setting must be set for each Amazon ECS resource\n\t\t\t\t\tseparately. The ARN and resource ID format of a resource is defined by the opt-in status of\n\t\t\t\t\tthe user or role that created the resource. You must turn on this setting to use Amazon ECS features\n\t\t\t\t\tsuch as resource tagging.

    \n
  • \n
  • \n

    \n containerInstanceLongArnFormat - When modified, the Amazon Resource Name (ARN)\n\t\t\t\t\tand resource ID format of the resource type for a specified user, role, or the root user for an\n\t\t\t\t\taccount is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource\n\t\t\t\t\tseparately. The ARN and resource ID format of a resource is defined by the opt-in status of\n\t\t\t\t\tthe user or role that created the resource. You must turn on this setting to use Amazon ECS features\n\t\t\t\t\tsuch as resource tagging.

    \n
  • \n
  • \n

    \n awsvpcTrunking - When modified, the elastic network interface (ENI) limit for\n\t\t\t\t\tany new container instances that support the feature is changed. If awsvpcTrunking\n\t\t\t\t\tis turned on, any new container instances that support the feature are launched have the\n\t\t\t\t\tincreased ENI limits available to them. For more information, see Elastic Network Interface\n\t\t\t\t\t\tTrunking in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    \n containerInsights - Container Insights with enhanced observability provides\n\t\t\t\t\tall the Container Insights metrics, plus additional task and container metrics.\n\t\t\t\t\tThis version supports enhanced observability for Amazon ECS clusters using the Amazon EC2\n\t\t\t\t\tand Fargate launch types. After you configure Container Insights with enhanced\n\t\t\t\t\tobservability on Amazon ECS, Container Insights auto-collects detailed infrastructure\n\t\t\t\t\ttelemetry from the cluster level down to the container level in your environment and\n\t\t\t\t\tdisplays these critical performance data in curated dashboards removing the\n\t\t\t\t\theavy lifting in observability set-up.

    \n

    To use Container Insights with enhanced observability, set the\n\t\t\t\t\t\tcontainerInsights account setting to\n\t\t\t\t\tenhanced.

    \n

    To use Container Insights, set the containerInsights account setting to\n\t\t\t\t\t\tenabled.

    \n

    For more information, see Monitor Amazon ECS containers using Container Insights with enhanced observability in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    \n dualStackIPv6 - When turned on, when using a VPC in dual stack mode, your tasks\n\t\t\t\t\tusing the awsvpc network mode can have an IPv6 address assigned. For more\n\t\t\t\t\tinformation on using IPv6 with tasks launched on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks\n\t\t\t\t\tlaunched on Fargate, see Using a VPC in dual-stack mode.

    \n
  • \n
  • \n

    \n fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a security or\n\t\t\t\t\tinfrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be\n\t\t\t\t\tstopped and new tasks launched to replace them. Use\n\t\t\t\t\t\tfargateTaskRetirementWaitPeriod to configure the wait time to retire a\n\t\t\t\t\tFargate task. For information about the Fargate tasks maintenance, see Amazon Web Services\n\t\t\t\t\t\tFargate task maintenance in the Amazon ECS Developer\n\t\t\t\t\tGuide.

    \n
  • \n
  • \n

    \n tagResourceAuthorization - Amazon ECS is introducing tagging authorization for\n\t\t\t\t\tresource creation. Users must have permissions for actions that create the resource, such as\n\t\t\t\t\t\tecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services\n\t\t\t\t\tperforms additional authorization to verify if users or roles have permissions to create tags.\n\t\t\t\t\tTherefore, you must grant explicit permissions to use the ecs:TagResource action.\n\t\t\t\t\tFor more information, see Grant permission\n\t\t\t\t\t\tto tag resources on creation in the Amazon ECS Developer\n\t\t\t\t\tGuide.

    \n
  • \n
  • \n

    \n guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether\n\t\t\tAmazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your\n\t\t\tAmazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.

    \n
  • \n
", + "smithy.api#documentation": "

The Amazon ECS account setting name to modify.

\n

The following are the valid values for the account setting name.

\n
    \n
  • \n

    \n serviceLongArnFormat - When modified, the Amazon Resource Name\n\t\t\t\t\t(ARN) and resource ID format of the resource type for a specified user, role, or\n\t\t\t\t\tthe root user for an account is affected. The opt-in and opt-out account setting\n\t\t\t\t\tmust be set for each Amazon ECS resource separately. The ARN and resource ID format\n\t\t\t\t\tof a resource is defined by the opt-in status of the user or role that created\n\t\t\t\t\tthe resource. You must turn on this setting to use Amazon ECS features such as\n\t\t\t\t\tresource tagging.

    \n
  • \n
  • \n

    \n taskLongArnFormat - When modified, the Amazon Resource Name (ARN)\n\t\t\t\t\tand resource ID format of the resource type for a specified user, role, or the\n\t\t\t\t\troot user for an account is affected. The opt-in and opt-out account setting must\n\t\t\t\t\tbe set for each Amazon ECS resource separately. The ARN and resource ID format of a\n\t\t\t\t\tresource is defined by the opt-in status of the user or role that created the\n\t\t\t\t\tresource. You must turn on this setting to use Amazon ECS features such as resource\n\t\t\t\t\ttagging.

    \n
  • \n
  • \n

    \n fargateFIPSMode - When turned on, you can run Fargate workloads\n\t\t\t\t\tin a manner that is compliant with Federal Information Processing Standard\n\t\t\t\t\t(FIPS-140). For more information, see Fargate\n\t\t\t\t\t\tFederal Information Processing Standard (FIPS-140).

    \n
  • \n
  • \n

    \n containerInstanceLongArnFormat - When modified, the Amazon\n\t\t\t\t\tResource Name (ARN) and resource ID format of the resource type for a specified\n\t\t\t\t\tuser, role, or the root user for an account is affected. The opt-in and opt-out\n\t\t\t\t\taccount setting must be set for each Amazon ECS resource separately. The ARN and\n\t\t\t\t\tresource ID format of a resource is defined by the opt-in status of the user or\n\t\t\t\t\trole that created the resource. You must turn on this setting to use Amazon ECS\n\t\t\t\t\tfeatures such as resource tagging.

    \n
  • \n
  • \n

    \n awsvpcTrunking - When modified, the elastic network interface\n\t\t\t\t\t(ENI) limit for any new container instances that support the feature is changed.\n\t\t\t\t\tIf awsvpcTrunking is turned on, any new container instances that\n\t\t\t\t\tsupport the feature are launched have the increased ENI limits available to\n\t\t\t\t\tthem. For more information, see Elastic\n\t\t\t\t\t\tNetwork Interface Trunking in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    \n containerInsights - Container Insights with enhanced\n\t\t\t\t\tobservability provides all the Container Insights metrics, plus additional task\n\t\t\t\t\tand container metrics. This version supports enhanced observability for Amazon ECS\n\t\t\t\t\tclusters using the Amazon EC2 and Fargate launch types. After you configure\n\t\t\t\t\tContainer Insights with enhanced observability on Amazon ECS, Container Insights\n\t\t\t\t\tauto-collects detailed infrastructure telemetry from the cluster level down to\n\t\t\t\t\tthe container level in your environment and displays these critical performance\n\t\t\t\t\tdata in curated dashboards removing the heavy lifting in observability set-up.

    \n

    To use Container Insights with enhanced observability, set the\n\t\t\t\t\t\tcontainerInsights account setting to\n\t\t\t\t\tenhanced.

    \n

    To use Container Insights, set the containerInsights account\n\t\t\t\t\tsetting to enabled.

    \n

    For more information, see Monitor Amazon ECS containers using Container Insights with enhanced\n\t\t\t\t\t\tobservability in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    \n dualStackIPv6 - When turned on, when using a VPC in dual stack\n\t\t\t\t\tmode, your tasks using the awsvpc network mode can have an IPv6\n\t\t\t\t\taddress assigned. For more information on using IPv6 with tasks launched on\n\t\t\t\t\tAmazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6\n\t\t\t\t\twith tasks launched on Fargate, see Using a VPC in dual-stack mode.

    \n
  • \n
  • \n

    \n fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a\n\t\t\t\t\tsecurity or infrastructure update is needed for an Amazon ECS task hosted on\n\t\t\t\t\tFargate, the tasks need to be stopped and new tasks launched to replace them.\n\t\t\t\t\tUse fargateTaskRetirementWaitPeriod to configure the wait time to\n\t\t\t\t\tretire a Fargate task. For information about the Fargate tasks maintenance,\n\t\t\t\t\tsee Amazon Web Services Fargate\n\t\t\t\t\t\ttask maintenance in the Amazon ECS Developer\n\t\t\t\t\tGuide.

    \n
  • \n
  • \n

    \n tagResourceAuthorization - Amazon ECS is introducing tagging\n\t\t\t\t\tauthorization for resource creation. Users must have permissions for actions\n\t\t\t\t\tthat create the resource, such as ecsCreateCluster. If tags are\n\t\t\t\t\tspecified when you create a resource, Amazon Web Services performs additional authorization to\n\t\t\t\t\tverify if users or roles have permissions to create tags. Therefore, you must\n\t\t\t\t\tgrant explicit permissions to use the ecs:TagResource action. For\n\t\t\t\t\tmore information, see Grant permission to tag resources on creation in the\n\t\t\t\t\t\tAmazon ECS Developer Guide.

    \n
  • \n
  • \n

    \n guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether\n\t\t\tAmazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your\n\t\t\tAmazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.

    \n
  • \n
", "smithy.api#required": {} } }, "value": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The account setting value for the specified principal ARN. Accepted values are\n\t\t\t\tenabled, disabled, enhanced,\n\t\t\t\ton, and off.

\n

When you specify fargateTaskRetirementWaitPeriod for the\n\t\t\t\tname, the following are the valid values:

\n
    \n
  • \n

    \n 0 - Amazon Web Services sends the notification, and immediately retires the affected\n\t\t\t\t\ttasks.

    \n
  • \n
  • \n

    \n 7 - Amazon Web Services sends the notification, and waits 7 calendar days to retire the\n\t\t\t\t\ttasks.

    \n
  • \n
  • \n

    \n 14 - Amazon Web Services sends the notification, and waits 14 calendar days to retire the\n\t\t\t\t\ttasks.

    \n
  • \n
", + "smithy.api#documentation": "

The account setting value for the specified principal ARN. Accepted values are\n\t\t\t\tenabled, disabled, enhanced, on,\n\t\t\tand off.

\n

When you specify fargateTaskRetirementWaitPeriod for the\n\t\t\t\tname, the following are the valid values:

\n
    \n
  • \n

    \n 0 - Amazon Web Services sends the notification, and immediately retires the\n\t\t\t\t\taffected tasks.

    \n
  • \n
  • \n

    \n 7 - Amazon Web Services sends the notification, and waits 7 calendar days to\n\t\t\t\t\tretire the tasks.

    \n
  • \n
  • \n

    \n 14 - Amazon Web Services sends the notification, and waits 14 calendar days to\n\t\t\t\t\tretire the tasks.

    \n
  • \n
", "smithy.api#required": {} } }, "principalArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The ARN of the principal, which can be a user, role, or the root user. If you specify the root user, it\n\t\t\tmodifies the account setting for all users, roles, and the root user of the account unless a user or role\n\t\t\texplicitly overrides these settings. If this field is omitted, the setting is changed only for the\n\t\t\tauthenticated user.

\n \n

You must use the root user when you set the Fargate wait time\n\t\t\t\t\t(fargateTaskRetirementWaitPeriod).

\n

Federated users assume the account setting of the root user and can't have explicit account settings\n\t\t\t\tset for them.

\n
" + "smithy.api#documentation": "

The ARN of the principal, which can be a user, role, or the root user. If you specify\n\t\t\tthe root user, it modifies the account setting for all users, roles, and the root user of the\n\t\t\taccount unless a user or role explicitly overrides these settings. If this field is\n\t\t\tomitted, the setting is changed only for the authenticated user.

\n \n

You must use the root user when you set the Fargate wait time\n\t\t\t\t\t(fargateTaskRetirementWaitPeriod).

\n

Federated users assume the account setting of the root user and can't have explicit\n\t\t\t\taccount settings set for them.

\n
" } } }, @@ -9245,7 +9245,7 @@ } ], "traits": { - "smithy.api#documentation": "

Create or update an attribute on an Amazon ECS resource. If the attribute doesn't exist, it's created. If\n\t\t\tthe attribute exists, its value is replaced with the specified value. To delete an attribute, use\n\t\t\t\tDeleteAttributes. For more information, see Attributes in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Create or update an attribute on an Amazon ECS resource. If the attribute doesn't exist,\n\t\t\tit's created. If the attribute exists, its value is replaced with the specified value.\n\t\t\tTo delete an attribute, use DeleteAttributes. For more information, see Attributes in the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#PutAttributesRequest": { @@ -9254,13 +9254,13 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that contains the resource to apply attributes.\n\t\t\tIf you do not specify a cluster, the default cluster is assumed.

" + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that contains the resource to apply\n\t\t\tattributes. If you do not specify a cluster, the default cluster is assumed.

" } }, "attributes": { "target": "com.amazonaws.ecs#Attributes", "traits": { - "smithy.api#documentation": "

The attributes to apply to your resource. You can specify up to 10 custom attributes for each\n\t\t\tresource. You can specify up to 10 attributes in a single call.

", + "smithy.api#documentation": "

The attributes to apply to your resource. You can specify up to 10 custom attributes\n\t\t\tfor each resource. You can specify up to 10 attributes in a single call.

", "smithy.api#required": {} } } @@ -9312,7 +9312,7 @@ } ], "traits": { - "smithy.api#documentation": "

Modifies the available capacity providers and the default capacity provider strategy for a\n\t\t\tcluster.

\n

You must specify both the available capacity providers and a default capacity provider strategy for\n\t\t\tthe cluster. If the specified cluster has existing capacity providers associated with it, you must\n\t\t\tspecify all existing capacity providers in addition to any new ones you want to add. Any existing\n\t\t\tcapacity providers that are associated with a cluster that are omitted from a PutClusterCapacityProviders API call will be disassociated with the cluster. You can only\n\t\t\tdisassociate an existing capacity provider from a cluster if it's not being used by any existing\n\t\t\ttasks.

\n

When creating a service or running a task on a cluster, if no capacity provider or launch type is\n\t\t\tspecified, then the cluster's default capacity provider strategy is used. We recommend that you define\n\t\t\ta default capacity provider strategy for your cluster. However, you must specify an empty array\n\t\t\t\t([]) to bypass defining a default strategy.

" + "smithy.api#documentation": "

Modifies the available capacity providers and the default capacity provider strategy\n\t\t\tfor a cluster.

\n

You must specify both the available capacity providers and a default capacity provider\n\t\t\tstrategy for the cluster. If the specified cluster has existing capacity providers\n\t\t\tassociated with it, you must specify all existing capacity providers in addition to any\n\t\t\tnew ones you want to add. Any existing capacity providers that are associated with a\n\t\t\tcluster that are omitted from a PutClusterCapacityProviders API call will be disassociated with the\n\t\t\tcluster. You can only disassociate an existing capacity provider from a cluster if it's\n\t\t\tnot being used by any existing tasks.

\n

When creating a service or running a task on a cluster, if no capacity provider or\n\t\t\tlaunch type is specified, then the cluster's default capacity provider strategy is used.\n\t\t\tWe recommend that you define a default capacity provider strategy for your cluster.\n\t\t\tHowever, you must specify an empty array ([]) to bypass defining a default\n\t\t\tstrategy.

" } }, "com.amazonaws.ecs#PutClusterCapacityProvidersRequest": { @@ -9321,21 +9321,21 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster to modify the capacity provider settings for. If you\n\t\t\tdon't specify a cluster, the default cluster is assumed.

", + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster to modify the capacity provider\n\t\t\tsettings for. If you don't specify a cluster, the default cluster is assumed.

", "smithy.api#required": {} } }, "capacityProviders": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The name of one or more capacity providers to associate with the cluster.

\n

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already\n\t\t\tbe created. New capacity providers can be created with the CreateCapacityProvider\n\t\t\tAPI operation.

\n

To use a Fargate capacity provider, specify either the FARGATE or\n\t\t\t\tFARGATE_SPOT capacity providers. The Fargate capacity providers are available to all\n\t\t\taccounts and only need to be associated with a cluster to be used.

", + "smithy.api#documentation": "

The name of one or more capacity providers to associate with the cluster.

\n

If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.

\n

To use a Fargate capacity provider, specify either the FARGATE or\n\t\t\t\tFARGATE_SPOT capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be\n\t\t\tused.

", "smithy.api#required": {} } }, "defaultCapacityProviderStrategy": { "target": "com.amazonaws.ecs#CapacityProviderStrategy", "traits": { - "smithy.api#documentation": "

The capacity provider strategy to use by default for the cluster.

\n

When creating a service or running a task on a cluster, if no capacity provider or launch type is\n\t\t\tspecified then the default capacity provider strategy for the cluster is used.

\n

A capacity provider strategy consists of one or more capacity providers along with the\n\t\t\t\tbase and weight to assign to them. A capacity provider must be associated\n\t\t\twith the cluster to be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster.\n\t\t\tOnly capacity providers with an ACTIVE or UPDATING status can be used.

\n

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already\n\t\t\tbe created. New capacity providers can be created with the CreateCapacityProvider\n\t\t\tAPI operation.

\n

To use a Fargate capacity provider, specify either the FARGATE or\n\t\t\t\tFARGATE_SPOT capacity providers. The Fargate capacity providers are available to all\n\t\t\taccounts and only need to be associated with a cluster to be used.

", + "smithy.api#documentation": "

The capacity provider strategy to use by default for the cluster.

\n

When creating a service or running a task on a cluster, if no capacity provider or\n\t\t\tlaunch type is specified then the default capacity provider strategy for the cluster is\n\t\t\tused.

\n

A capacity provider strategy consists of one or more capacity providers along with the\n\t\t\t\tbase and weight to assign to them. A capacity provider\n\t\t\tmust be associated with the cluster to be used in a capacity provider strategy. The\n\t\t\t\tPutClusterCapacityProviders API is used to associate a capacity provider\n\t\t\twith a cluster. Only capacity providers with an ACTIVE or\n\t\t\t\tUPDATING status can be used.

\n

If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.

\n

To use a Fargate capacity provider, specify either the FARGATE or\n\t\t\t\tFARGATE_SPOT capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be\n\t\t\tused.

", "smithy.api#required": {} } } @@ -9378,7 +9378,7 @@ } ], "traits": { - "smithy.api#documentation": "\n

This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.

\n
\n

Registers an EC2 instance into the specified cluster. This instance becomes available to place\n\t\t\tcontainers on.

" + "smithy.api#documentation": "\n

This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.

\n
\n

Registers an EC2 instance into the specified cluster. This instance becomes available\n\t\t\tto place containers on.

" } }, "com.amazonaws.ecs#RegisterContainerInstanceRequest": { @@ -9387,19 +9387,19 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster to register your container instance with.\n\t\t\tIf you do not specify a cluster, the default cluster is assumed.

" + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster to register your container instance\n\t\t\twith. If you do not specify a cluster, the default cluster is assumed.

" } }, "instanceIdentityDocument": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The instance identity document for the EC2 instance to register. This document can be found by\n\t\t\trunning the following command from the instance: curl\n\t\t\t\thttp://169.254.169.254/latest/dynamic/instance-identity/document/\n

" + "smithy.api#documentation": "

The instance identity document for the EC2 instance to register. This document can be\n\t\t\tfound by running the following command from the instance: curl\n\t\t\t\thttp://169.254.169.254/latest/dynamic/instance-identity/document/\n

" } }, "instanceIdentityDocumentSignature": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The instance identity document signature for the EC2 instance to register. This signature can be\n\t\t\tfound by running the following command from the instance: curl\n\t\t\t\thttp://169.254.169.254/latest/dynamic/instance-identity/signature/\n

" + "smithy.api#documentation": "

The instance identity document signature for the EC2 instance to register. This\n\t\t\tsignature can be found by running the following command from the instance: curl\n\t\t\t\thttp://169.254.169.254/latest/dynamic/instance-identity/signature/\n

" } }, "totalResources": { @@ -9411,7 +9411,7 @@ "versionInfo": { "target": "com.amazonaws.ecs#VersionInfo", "traits": { - "smithy.api#documentation": "

The version information for the Amazon ECS container agent and Docker daemon that runs on the container\n\t\t\tinstance.

" + "smithy.api#documentation": "

The version information for the Amazon ECS container agent and Docker daemon that runs on\n\t\t\tthe container instance.

" } }, "containerInstanceArn": { @@ -9429,13 +9429,13 @@ "platformDevices": { "target": "com.amazonaws.ecs#PlatformDevices", "traits": { - "smithy.api#documentation": "

The devices that are available on the container instance. The only supported device type is a\n\t\t\tGPU.

" + "smithy.api#documentation": "

The devices that are available on the container instance. The only supported device\n\t\t\ttype is a GPU.

" } }, "tags": { "target": "com.amazonaws.ecs#Tags", "traits": { - "smithy.api#documentation": "

The metadata that you apply to the container instance to help you categorize and organize them. Each\n\t\t\ttag consists of a key and an optional value. You define both.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" + "smithy.api#documentation": "

The metadata that you apply to the container instance to help you categorize and\n\t\t\torganize them. Each tag consists of a key and an optional value. You define both.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" } } }, @@ -9477,7 +9477,7 @@ } ], "traits": { - "smithy.api#documentation": "

Registers a new task definition from the supplied family and\n\t\t\t\tcontainerDefinitions. Optionally, you can add data volumes to your containers with the\n\t\t\t\tvolumes parameter. For more information about task definition parameters and defaults,\n\t\t\tsee Amazon ECS Task\n\t\t\t\tDefinitions in the Amazon Elastic Container Service Developer Guide.

\n

You can specify a role for your task with the taskRoleArn parameter. When you specify a\n\t\t\trole for a task, its containers can then use the latest versions of the CLI or SDKs to make API\n\t\t\trequests to the Amazon Web Services services that are specified in the policy that's associated with the role. For\n\t\t\tmore information, see IAM Roles for Tasks in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

\n

You can specify a Docker networking mode for the containers in your task definition with the\n\t\t\t\tnetworkMode parameter. If you specify the awsvpc network mode, the task\n\t\t\tis allocated an elastic network interface, and you must specify a NetworkConfiguration when\n\t\t\tyou create a service or run a task with the task definition. For more information, see Task\n\t\t\t\tNetworking in the Amazon Elastic Container Service Developer Guide.

", + "smithy.api#documentation": "

Registers a new task definition from the supplied family and\n\t\t\t\tcontainerDefinitions. Optionally, you can add data volumes to your\n\t\t\tcontainers with the volumes parameter. For more information about task\n\t\t\tdefinition parameters and defaults, see Amazon ECS Task\n\t\t\t\tDefinitions in the Amazon Elastic Container Service Developer Guide.

\n

You can specify a role for your task with the taskRoleArn parameter. When\n\t\t\tyou specify a role for a task, its containers can then use the latest versions of the\n\t\t\tCLI or SDKs to make API requests to the Amazon Web Services services that are specified in the\n\t\t\tpolicy that's associated with the role. For more information, see IAM\n\t\t\t\tRoles for Tasks in the Amazon Elastic Container Service Developer Guide.

\n

You can specify a Docker networking mode for the containers in your task definition\n\t\t\twith the networkMode parameter. If you specify the awsvpc\n\t\t\tnetwork mode, the task is allocated an elastic network interface, and you must specify a\n\t\t\t\tNetworkConfiguration when you create a service or run a task with the task\n\t\t\tdefinition. For more information, see Task Networking\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

", "smithy.api#examples": [ { "title": "To register a task definition", @@ -9535,14 +9535,14 @@ "family": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

You must specify a family for a task definition. You can use it track multiple versions\n\t\t\tof the same task definition. The family is used as a name for your task definition.\n\t\t\tUp to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.

", + "smithy.api#documentation": "

You must specify a family for a task definition. You can use it track\n\t\t\tmultiple versions of the same task definition. The family is used as a name\n\t\t\tfor your task definition. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.

", "smithy.api#required": {} } }, "taskRoleArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All\n\t\t\tcontainers in this task are granted the permissions that are specified in this role. For more\n\t\t\tinformation, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the IAM role that containers in this task can\n\t\t\tassume. All containers in this task are granted the permissions that are specified in\n\t\t\tthis role. For more information, see IAM Roles for\n\t\t\t\tTasks in the Amazon Elastic Container Service Developer Guide.

" } }, "executionRoleArn": { @@ -9560,44 +9560,44 @@ "containerDefinitions": { "target": "com.amazonaws.ecs#ContainerDefinitions", "traits": { - "smithy.api#documentation": "

A list of container definitions in JSON format that describe the different containers that make up\n\t\t\tyour task.

", + "smithy.api#documentation": "

A list of container definitions in JSON format that describe the different containers\n\t\t\tthat make up your task.

", "smithy.api#required": {} } }, "volumes": { "target": "com.amazonaws.ecs#VolumeList", "traits": { - "smithy.api#documentation": "

A list of volume definitions in JSON format that containers in your task might use.

" + "smithy.api#documentation": "

A list of volume definitions in JSON format that containers in your task might\n\t\t\tuse.

" } }, "placementConstraints": { "target": "com.amazonaws.ecs#TaskDefinitionPlacementConstraints", "traits": { - "smithy.api#documentation": "

An array of placement constraint objects to use for the task. You can specify a maximum of 10\n\t\t\tconstraints for each task. This limit includes constraints in the task definition and those specified\n\t\t\tat runtime.

" + "smithy.api#documentation": "

An array of placement constraint objects to use for the task. You can specify a\n\t\t\tmaximum of 10 constraints for each task. This limit includes constraints in the task\n\t\t\tdefinition and those specified at runtime.

" } }, "requiresCompatibilities": { "target": "com.amazonaws.ecs#CompatibilityList", "traits": { - "smithy.api#documentation": "

The task launch type that Amazon ECS validates the task definition against. A client exception is returned\n\t\t\tif the task definition doesn't validate against the compatibilities specified. If no value is\n\t\t\tspecified, the parameter is omitted from the response.

" + "smithy.api#documentation": "

The task launch type that Amazon ECS validates the task definition against. A client\n\t\t\texception is returned if the task definition doesn't validate against the\n\t\t\tcompatibilities specified. If no value is specified, the parameter is omitted from the\n\t\t\tresponse.

" } }, "cpu": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The number of CPU units used by the task. It can be expressed as an integer using CPU units (for\n\t\t\texample, 1024) or as a string using vCPUs (for example, 1 vCPU or 1\n\t\t\t\tvcpu) in a task definition. String values are converted to an integer indicating the CPU\n\t\t\tunits when the task definition is registered.

\n \n

Task-level CPU and memory parameters are ignored for Windows containers. We recommend specifying\n\t\t\t\tcontainer-level resources for Windows containers.

\n
\n

If you're using the EC2 launch type, this field is optional. Supported values are\n\t\t\tbetween 128 CPU units (0.125 vCPUs) and 10240 CPU units\n\t\t\t\t(10 vCPUs). If you do not specify a value, the parameter is ignored.

\n

If you're using the Fargate launch type, this field is required and you must use one of\n\t\t\tthe following values, which determines your range of supported values for the memory\n\t\t\tparameter:

\n

The CPU units cannot be less than 1 vCPU when you use Windows containers on\n\t\t\tFargate.

\n
    \n
  • \n

    256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)

    \n
  • \n
  • \n

    512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)

    \n
  • \n
  • \n

    1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)

    \n
  • \n
  • \n

    2048 (2 vCPU) - Available memory values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)

    \n
  • \n
  • \n

    4096 (4 vCPU) - Available memory values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)

    \n
  • \n
  • \n

    8192 (8 vCPU) - Available memory values: 16 GB and 60 GB in 4 GB increments

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
  • \n

    16384 (16vCPU) - Available memory values: 32GB and 120 GB in 8 GB increments

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The number of CPU units used by the task. It can be expressed as an integer using CPU\n\t\t\tunits (for example, 1024) or as a string using vCPUs (for example, 1\n\t\t\t\tvCPU or 1 vcpu) in a task definition. String values are\n\t\t\tconverted to an integer indicating the CPU units when the task definition is\n\t\t\tregistered.

\n \n

Task-level CPU and memory parameters are ignored for Windows containers. We\n\t\t\t\trecommend specifying container-level resources for Windows containers.

\n
\n

If you're using the EC2 launch type, this field is optional. Supported\n\t\t\tvalues are between 128 CPU units (0.125 vCPUs) and\n\t\t\t\t10240 CPU units (10 vCPUs). If you do not specify a value,\n\t\t\tthe parameter is ignored.

\n

If you're using the Fargate launch type, this field is required and you\n\t\t\tmust use one of the following values, which determines your range of supported values\n\t\t\tfor the memory parameter:

\n

The CPU units cannot be less than 1 vCPU when you use Windows containers on\n\t\t\tFargate.

\n
    \n
  • \n

    256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)

    \n
  • \n
  • \n

    512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)

    \n
  • \n
  • \n

    1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)

    \n
  • \n
  • \n

    2048 (2 vCPU) - Available memory values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)

    \n
  • \n
  • \n

    4096 (4 vCPU) - Available memory values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)

    \n
  • \n
  • \n

    8192 (8 vCPU) - Available memory values: 16 GB and 60 GB in 4 GB increments

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
  • \n

    16384 (16vCPU) - Available memory values: 32GB and 120 GB in 8 GB increments

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
" } }, "memory": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The amount of memory (in MiB) used by the task. It can be expressed as an integer using MiB (for\n\t\t\texample ,1024) or as a string using GB (for example, 1GB or 1\n\t\t\tGB) in a task definition. String values are converted to an integer indicating the MiB when the\n\t\t\ttask definition is registered.

\n \n

Task-level CPU and memory parameters are ignored for Windows containers. We recommend specifying\n\t\t\t\tcontainer-level resources for Windows containers.

\n
\n

If using the EC2 launch type, this field is optional.

\n

If using the Fargate launch type, this field is required and you must use one of the\n\t\t\tfollowing values. This determines your range of supported values for the cpu\n\t\t\tparameter.

\n

The CPU units cannot be less than 1 vCPU when you use Windows containers on\n\t\t\tFargate.

\n
    \n
  • \n

    512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU)

    \n
  • \n
  • \n

    1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU)

    \n
  • \n
  • \n

    2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU)

    \n
  • \n
  • \n

    Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU)

    \n
  • \n
  • \n

    Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU)

    \n
  • \n
  • \n

    Between 16 GB and 60 GB in 4 GB increments - Available cpu values: 8192 (8 vCPU)

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
  • \n

    Between 32GB and 120 GB in 8 GB increments - Available cpu values: 16384 (16 vCPU)

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The amount of memory (in MiB) used by the task. It can be expressed as an integer\n\t\t\tusing MiB (for example ,1024) or as a string using GB (for example,\n\t\t\t\t1GB or 1 GB) in a task definition. String values are\n\t\t\tconverted to an integer indicating the MiB when the task definition is\n\t\t\tregistered.

\n \n

Task-level CPU and memory parameters are ignored for Windows containers. We\n\t\t\t\trecommend specifying container-level resources for Windows containers.

\n
\n

If using the EC2 launch type, this field is optional.

\n

If using the Fargate launch type, this field is required and you must\n\t\t\tuse one of the following values. This determines your range of supported values for the\n\t\t\t\tcpu parameter.

\n

The CPU units cannot be less than 1 vCPU when you use Windows containers on\n\t\t\tFargate.

\n
    \n
  • \n

    512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU)

    \n
  • \n
  • \n

    1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU)

    \n
  • \n
  • \n

    2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU)

    \n
  • \n
  • \n

    Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU)

    \n
  • \n
  • \n

    Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU)

    \n
  • \n
  • \n

    Between 16 GB and 60 GB in 4 GB increments - Available cpu values: 8192 (8 vCPU)

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
  • \n

    Between 32GB and 120 GB in 8 GB increments - Available cpu values: 16384 (16 vCPU)

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
" } }, "tags": { "target": "com.amazonaws.ecs#Tags", "traits": { - "smithy.api#documentation": "

The metadata that you apply to the task definition to help you categorize and organize them. Each tag\n\t\t\tconsists of a key and an optional value. You define both of them.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" + "smithy.api#documentation": "

The metadata that you apply to the task definition to help you categorize and organize\n\t\t\tthem. Each tag consists of a key and an optional value. You define both of them.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" } }, "pidMode": { @@ -9615,7 +9615,7 @@ "proxyConfiguration": { "target": "com.amazonaws.ecs#ProxyConfiguration", "traits": { - "smithy.api#documentation": "

The configuration details for the App Mesh proxy.

\n

For tasks hosted on Amazon EC2 instances, the container instances require at least version\n\t\t\t\t1.26.0 of the container agent and at least version 1.26.0-1 of the\n\t\t\t\tecs-init package to use a proxy configuration. If your container instances are\n\t\t\tlaunched from the Amazon ECS-optimized AMI version 20190301 or later, then they contain\n\t\t\tthe required versions of the container agent and ecs-init. For more information, see\n\t\t\t\tAmazon ECS-optimized AMI versions in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The configuration details for the App Mesh proxy.

\n

For tasks hosted on Amazon EC2 instances, the container instances require at least version\n\t\t\t\t1.26.0 of the container agent and at least version\n\t\t\t\t1.26.0-1 of the ecs-init package to use a proxy\n\t\t\tconfiguration. If your container instances are launched from the Amazon ECS-optimized\n\t\t\tAMI version 20190301 or later, then they contain the required versions of\n\t\t\tthe container agent and ecs-init. For more information, see Amazon ECS-optimized AMI versions in the Amazon Elastic Container Service Developer Guide.

" } }, "inferenceAccelerators": { @@ -9627,13 +9627,19 @@ "ephemeralStorage": { "target": "com.amazonaws.ecs#EphemeralStorage", "traits": { - "smithy.api#documentation": "

The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total\n\t\t\tamount of ephemeral storage available, beyond the default amount, for tasks hosted on Fargate. For\n\t\t\tmore information, see Using data volumes in tasks\n\t\t\tin the Amazon ECS Developer Guide.

\n \n

For tasks using the Fargate launch type, the task requires the following\n\t\t\t\tplatforms:

\n
    \n
  • \n

    Linux platform version 1.4.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
\n
" + "smithy.api#documentation": "

The amount of ephemeral storage to allocate for the task. This parameter is used to\n\t\t\texpand the total amount of ephemeral storage available, beyond the default amount, for\n\t\t\ttasks hosted on Fargate. For more information, see Using data volumes in\n\t\t\t\ttasks in the Amazon ECS Developer Guide.

\n \n

For tasks using the Fargate launch type, the task requires the\n\t\t\t\tfollowing platforms:

\n
    \n
  • \n

    Linux platform version 1.4.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
\n
" } }, "runtimePlatform": { "target": "com.amazonaws.ecs#RuntimePlatform", "traits": { - "smithy.api#documentation": "

The operating system that your tasks definitions run on. A platform family is specified only for\n\t\t\ttasks using the Fargate launch type.

" + "smithy.api#documentation": "

The operating system that your tasks definitions run on. A platform family is\n\t\t\tspecified only for tasks using the Fargate launch type.

" + } + }, + "enableFaultInjection": { + "target": "com.amazonaws.ecs#BoxedBoolean", + "traits": { + "smithy.api#documentation": "

Enables fault injection when you register your task definition and allows for fault injection requests \n\t\t\tto be accepted from the task's containers. The default value is false.

" } } }, @@ -9667,7 +9673,7 @@ "credentialsParameter": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the secret containing the private repository credentials.

\n \n

When you use the Amazon ECS API, CLI, or Amazon Web Services SDK, if the secret exists in the same Region as the\n\t\t\t\ttask that you're launching then you can use either the full ARN or the name of the secret. When\n\t\t\t\tyou use the Amazon Web Services Management Console, you must specify the full ARN of the secret.

\n
", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the secret containing the private repository\n\t\t\tcredentials.

\n \n

When you use the Amazon ECS API, CLI, or Amazon Web Services SDK, if the secret exists in the same\n\t\t\t\tRegion as the task that you're launching then you can use either the full ARN or\n\t\t\t\tthe name of the secret. When you use the Amazon Web Services Management Console, you must specify the full ARN\n\t\t\t\tof the secret.

\n
", "smithy.api#required": {} } } @@ -9688,40 +9694,40 @@ "name": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of the resource, such as CPU, MEMORY, PORTS,\n\t\t\t\tPORTS_UDP, or a user-defined resource.

" + "smithy.api#documentation": "

The name of the resource, such as CPU, MEMORY,\n\t\t\t\tPORTS, PORTS_UDP, or a user-defined resource.

" } }, "type": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The type of the resource. Valid values: INTEGER, DOUBLE, LONG,\n\t\t\tor STRINGSET.

" + "smithy.api#documentation": "

The type of the resource. Valid values: INTEGER, DOUBLE,\n\t\t\t\tLONG, or STRINGSET.

" } }, "doubleValue": { "target": "com.amazonaws.ecs#Double", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

When the doubleValue type is set, the value of the resource must be a double precision\n\t\t\tfloating-point type.

" + "smithy.api#documentation": "

When the doubleValue type is set, the value of the resource must be a\n\t\t\tdouble precision floating-point type.

" } }, "longValue": { "target": "com.amazonaws.ecs#Long", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

When the longValue type is set, the value of the resource must be an extended precision\n\t\t\tfloating-point type.

" + "smithy.api#documentation": "

When the longValue type is set, the value of the resource must be an\n\t\t\textended precision floating-point type.

" } }, "integerValue": { "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

When the integerValue type is set, the value of the resource must be an integer.

" + "smithy.api#documentation": "

When the integerValue type is set, the value of the resource must be an\n\t\t\tinteger.

" } }, "stringSetValue": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

When the stringSetValue type is set, the value of the resource must be a string\n\t\t\ttype.

" + "smithy.api#documentation": "

When the stringSetValue type is set, the value of the resource must be a\n\t\t\tstring type.

" } } }, @@ -9771,7 +9777,7 @@ "value": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The value for the specified resource type.

\n

When the type is GPU, the value is the number of physical GPUs the Amazon ECS\n\t\t\tcontainer agent reserves for the container. The number of GPUs that's reserved for all containers in a\n\t\t\ttask can't exceed the number of available GPUs on the container instance that the task is launched\n\t\t\ton.

\n

When the type is InferenceAccelerator, the value matches the\n\t\t\t\tdeviceName for an InferenceAccelerator\n\t\t\tspecified in a task definition.

", + "smithy.api#documentation": "

The value for the specified resource type.

\n

When the type is GPU, the value is the number of physical\n\t\t\t\tGPUs the Amazon ECS container agent reserves for the container. The number\n\t\t\tof GPUs that's reserved for all containers in a task can't exceed the number of\n\t\t\tavailable GPUs on the container instance that the task is launched on.

\n

When the type is InferenceAccelerator, the value matches the\n\t\t\t\tdeviceName for an InferenceAccelerator specified in a task definition.

", "smithy.api#required": {} } }, @@ -9784,7 +9790,7 @@ } }, "traits": { - "smithy.api#documentation": "

The type and amount of a resource to assign to a container. The supported resource types are GPUs and\n\t\t\tElastic Inference accelerators. For more information, see Working with GPUs on Amazon ECS or Working with Amazon\n\t\t\t\tElastic Inference on Amazon ECS in the Amazon Elastic Container Service Developer Guide\n

" + "smithy.api#documentation": "

The type and amount of a resource to assign to a container. The supported resource\n\t\t\ttypes are GPUs and Elastic Inference accelerators. For more information, see Working with\n\t\t\t\tGPUs on Amazon ECS or Working with Amazon Elastic\n\t\t\t\tInference on Amazon ECS in the Amazon Elastic Container Service Developer Guide\n

" } }, "com.amazonaws.ecs#ResourceRequirements": { @@ -9822,19 +9828,19 @@ "reason": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The reason the rollback happened. For example, the circuit breaker initiated the rollback operation.

" + "smithy.api#documentation": "

The reason the rollback happened. For example, the circuit breaker initiated the\n\t\t\trollback operation.

" } }, "startedAt": { "target": "com.amazonaws.ecs#Timestamp", "traits": { - "smithy.api#documentation": "

Time time that the rollback started. The format is yyyy-MM-dd HH:mm:ss.SSSSSS.

" + "smithy.api#documentation": "

Time time that the rollback started. The format is yyyy-MM-dd HH:mm:ss.SSSSSS.

" } }, "serviceRevisionArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The ARN of the service revision deployed as part of the rollback.

\n

When the type is GPU, the value is the number of physical\n\t\t\tGPUs the Amazon ECS container agent reserves for the container. The number\n\t\t\tof GPUs that's reserved for all containers in a task can't exceed the number of\n\t\t\tavailable GPUs on the container instance that the task is launched on.

\n

When the type is InferenceAccelerator, the value matches the\n\t\t\tdeviceName for an InferenceAccelerator specified in a task definition.

" + "smithy.api#documentation": "

The ARN of the service revision deployed as part of the rollback.

\n

When the type is GPU, the value is the number of physical\n\t\t\t\tGPUs the Amazon ECS container agent reserves for the container. The number\n\t\t\tof GPUs that's reserved for all containers in a task can't exceed the number of\n\t\t\tavailable GPUs on the container instance that the task is launched on.

\n

When the type is InferenceAccelerator, the value matches the\n\t\t\t\tdeviceName for an InferenceAccelerator specified in a task definition.

" } } }, @@ -9883,7 +9889,7 @@ } ], "traits": { - "smithy.api#documentation": "

Starts a new task using the specified task definition.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n \n

Amazon Elastic Inference (EI) is no longer available to customers.

\n
\n

You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using\n\t\t\tplacement constraints and placement strategies. For more information, see Scheduling\n\t\t\t\tTasks in the Amazon Elastic Container Service Developer Guide.

\n

Alternatively, you can use StartTask to use your own scheduler or place tasks manually\n\t\t\ton specific container instances.

\n

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a\n\t\t\tservice. For more infomation, see Amazon EBS\n\t\t\t\tvolumes in the Amazon Elastic Container Service Developer Guide.

\n

The Amazon ECS API follows an eventual consistency model. This is because of the distributed nature of the\n\t\t\tsystem supporting the API. This means that the result of an API command you run that affects your Amazon ECS\n\t\t\tresources might not be immediately visible to all subsequent commands you run. Keep this in mind when\n\t\t\tyou carry out an API command that immediately follows a previous API command.

\n

To manage eventual consistency, you can do the following:

\n
    \n
  • \n

    Confirm the state of the resource before you run a command to modify it. Run the\n\t\t\t\t\tDescribeTasks command using an exponential backoff algorithm to ensure that you allow enough\n\t\t\t\t\ttime for the previous command to propagate through the system. To do this, run the\n\t\t\t\t\tDescribeTasks command repeatedly, starting with a couple of seconds of wait time and increasing\n\t\t\t\t\tgradually up to five minutes of wait time.

    \n
  • \n
  • \n

    Add wait time between subsequent commands, even if the DescribeTasks command returns an\n\t\t\t\t\taccurate response. Apply an exponential backoff algorithm starting with a couple of seconds of\n\t\t\t\t\twait time, and increase gradually up to about five minutes of wait time.

    \n
  • \n
", + "smithy.api#documentation": "

Starts a new task using the specified task definition.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n \n

Amazon Elastic Inference (EI) is no longer available to customers.

\n
\n

You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places\n\t\t\ttasks using placement constraints and placement strategies. For more information, see\n\t\t\t\tScheduling Tasks in the Amazon Elastic Container Service Developer Guide.

\n

Alternatively, you can use StartTask to use your own scheduler or place\n\t\t\ttasks manually on specific container instances.

\n

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or\n\t\t\tupdating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

\n

The Amazon ECS API follows an eventual consistency model. This is because of the\n\t\t\tdistributed nature of the system supporting the API. This means that the result of an\n\t\t\tAPI command you run that affects your Amazon ECS resources might not be immediately visible\n\t\t\tto all subsequent commands you run. Keep this in mind when you carry out an API command\n\t\t\tthat immediately follows a previous API command.

\n

To manage eventual consistency, you can do the following:

\n
    \n
  • \n

    Confirm the state of the resource before you run a command to modify it. Run\n\t\t\t\t\tthe DescribeTasks command using an exponential backoff algorithm to ensure that\n\t\t\t\t\tyou allow enough time for the previous command to propagate through the system.\n\t\t\t\t\tTo do this, run the DescribeTasks command repeatedly, starting with a couple of\n\t\t\t\t\tseconds of wait time and increasing gradually up to five minutes of wait\n\t\t\t\t\ttime.

    \n
  • \n
  • \n

    Add wait time between subsequent commands, even if the DescribeTasks command\n\t\t\t\t\treturns an accurate response. Apply an exponential backoff algorithm starting\n\t\t\t\t\twith a couple of seconds of wait time, and increase gradually up to about five\n\t\t\t\t\tminutes of wait time.

    \n
  • \n
", "smithy.api#examples": [ { "title": "To run a task on your default cluster", @@ -9928,81 +9934,81 @@ "capacityProviderStrategy": { "target": "com.amazonaws.ecs#CapacityProviderStrategy", "traits": { - "smithy.api#documentation": "

The capacity provider strategy to use for the task.

\n

If a capacityProviderStrategy is specified, the launchType parameter must\n\t\t\tbe omitted. If no capacityProviderStrategy or launchType is specified, the\n\t\t\t\tdefaultCapacityProviderStrategy for the cluster is used.

\n

When you use cluster auto scaling, you must specify capacityProviderStrategy and not\n\t\t\t\tlaunchType.

\n

A capacity provider strategy may contain a maximum of 6 capacity providers.

" + "smithy.api#documentation": "

The capacity provider strategy to use for the task.

\n

If a capacityProviderStrategy is specified, the launchType\n\t\t\tparameter must be omitted. If no capacityProviderStrategy or\n\t\t\t\tlaunchType is specified, the\n\t\t\t\tdefaultCapacityProviderStrategy for the cluster is used.

\n

When you use cluster auto scaling, you must specify\n\t\t\t\tcapacityProviderStrategy and not launchType.

\n

A capacity provider strategy can contain a maximum of 20 capacity providers.

" } }, "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster to run your task on. If you do not specify a cluster, the default cluster is assumed.

" + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster to run your task on.\n\t\t\tIf you do not specify a cluster, the default cluster is assumed.

" } }, "count": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The number of instantiations of the specified task to place on your cluster. You can specify up to 10\n\t\t\ttasks for each call.

" + "smithy.api#documentation": "

The number of instantiations of the specified task to place on your cluster. You can\n\t\t\tspecify up to 10 tasks for each call.

" } }, "enableECSManagedTags": { "target": "com.amazonaws.ecs#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Specifies whether to use Amazon ECS managed tags for the task. For more information, see Tagging Your Amazon ECS\n\t\t\t\tResources in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Specifies whether to use Amazon ECS managed tags for the task. For more information, see\n\t\t\t\tTagging Your Amazon ECS\n\t\t\t\tResources in the Amazon Elastic Container Service Developer Guide.

" } }, "enableExecuteCommand": { "target": "com.amazonaws.ecs#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Determines whether to use the execute command functionality for the containers in this task. If\n\t\t\t\ttrue, this enables execute command functionality on all containers in the task.

\n

If true, then the task definition must have a task role, or you must provide one as an\n\t\t\toverride.

" + "smithy.api#documentation": "

Determines whether to use the execute command functionality for the containers in this\n\t\t\ttask. If true, this enables execute command functionality on all containers\n\t\t\tin the task.

\n

If true, then the task definition must have a task role, or you must\n\t\t\tprovide one as an override.

" } }, "group": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of the task group to associate with the task. The default value is the family name of the\n\t\t\ttask definition (for example, family:my-family-name).

" + "smithy.api#documentation": "

The name of the task group to associate with the task. The default value is the family\n\t\t\tname of the task definition (for example, family:my-family-name).

" } }, "launchType": { "target": "com.amazonaws.ecs#LaunchType", "traits": { - "smithy.api#documentation": "

The infrastructure to run your standalone task on. For more information, see Amazon ECS launch\n\t\t\t\ttypes in the Amazon Elastic Container Service Developer Guide.

\n

The FARGATE launch type runs your tasks on Fargate On-Demand infrastructure.

\n \n

Fargate Spot infrastructure is available for use but a capacity provider strategy must be used.\n\t\t\t\tFor more information, see Fargate capacity providers in the Amazon ECS Developer\n\t\t\t\tGuide.

\n
\n

The EC2 launch type runs your tasks on Amazon EC2 instances registered to your\n\t\t\tcluster.

\n

The EXTERNAL launch type runs your tasks on your on-premises server or virtual machine\n\t\t\t(VM) capacity registered to your cluster.

\n

A task can use either a launch type or a capacity provider strategy. If a launchType is\n\t\t\tspecified, the capacityProviderStrategy parameter must be omitted.

\n

When you use cluster auto scaling, you must specify capacityProviderStrategy and not\n\t\t\t\tlaunchType.

" + "smithy.api#documentation": "

The infrastructure to run your standalone task on. For more information, see Amazon ECS\n\t\t\t\tlaunch types in the Amazon Elastic Container Service Developer Guide.

\n

The FARGATE launch type runs your tasks on Fargate On-Demand\n\t\t\tinfrastructure.

\n \n

Fargate Spot infrastructure is available for use but a capacity provider\n\t\t\t\tstrategy must be used. For more information, see Fargate capacity providers in the\n\t\t\t\t\tAmazon ECS Developer Guide.

\n
\n

The EC2 launch type runs your tasks on Amazon EC2 instances registered to your\n\t\t\tcluster.

\n

The EXTERNAL launch type runs your tasks on your on-premises server or\n\t\t\tvirtual machine (VM) capacity registered to your cluster.

\n

A task can use either a launch type or a capacity provider strategy. If a\n\t\t\t\tlaunchType is specified, the capacityProviderStrategy\n\t\t\tparameter must be omitted.

\n

When you use cluster auto scaling, you must specify\n\t\t\t\tcapacityProviderStrategy and not launchType.

" } }, "networkConfiguration": { "target": "com.amazonaws.ecs#NetworkConfiguration", "traits": { - "smithy.api#documentation": "

The network configuration for the task. This parameter is required for task definitions that use the\n\t\t\t\tawsvpc network mode to receive their own elastic network interface, and it isn't\n\t\t\tsupported for other network modes. For more information, see Task networking in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The network configuration for the task. This parameter is required for task\n\t\t\tdefinitions that use the awsvpc network mode to receive their own elastic\n\t\t\tnetwork interface, and it isn't supported for other network modes. For more information,\n\t\t\tsee Task networking\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

" } }, "overrides": { "target": "com.amazonaws.ecs#TaskOverride", "traits": { - "smithy.api#documentation": "

A list of container overrides in JSON format that specify the name of a container in the specified\n\t\t\ttask definition and the overrides it should receive. You can override the default command for a\n\t\t\tcontainer (that's specified in the task definition or Docker image) with a command\n\t\t\toverride. You can also override existing environment variables (that are specified in the task\n\t\t\tdefinition or Docker image) on a container or add new environment variables to it with an\n\t\t\t\tenvironment override.

\n

A total of 8192 characters are allowed for overrides. This limit includes the JSON formatting\n\t\t\tcharacters of the override structure.

" + "smithy.api#documentation": "

A list of container overrides in JSON format that specify the name of a container in\n\t\t\tthe specified task definition and the overrides it should receive. You can override the\n\t\t\tdefault command for a container (that's specified in the task definition or Docker\n\t\t\timage) with a command override. You can also override existing environment\n\t\t\tvariables (that are specified in the task definition or Docker image) on a container or\n\t\t\tadd new environment variables to it with an environment override.

\n

A total of 8192 characters are allowed for overrides. This limit includes the JSON\n\t\t\tformatting characters of the override structure.

" } }, "placementConstraints": { "target": "com.amazonaws.ecs#PlacementConstraints", "traits": { - "smithy.api#documentation": "

An array of placement constraint objects to use for the task. You can specify up to 10 constraints\n\t\t\tfor each task (including constraints in the task definition and those specified at runtime).

" + "smithy.api#documentation": "

An array of placement constraint objects to use for the task. You can specify up to 10\n\t\t\tconstraints for each task (including constraints in the task definition and those\n\t\t\tspecified at runtime).

" } }, "placementStrategy": { "target": "com.amazonaws.ecs#PlacementStrategies", "traits": { - "smithy.api#documentation": "

The placement strategy objects to use for the task. You can specify a maximum of 5 strategy rules for\n\t\t\teach task.

" + "smithy.api#documentation": "

The placement strategy objects to use for the task. You can specify a maximum of 5\n\t\t\tstrategy rules for each task.

" } }, "platformVersion": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The platform version the task uses. A platform version is only specified for tasks hosted on\n\t\t\tFargate. If one isn't specified, the LATEST platform version is used. For\n\t\t\tmore information, see Fargate platform versions in\n\t\t\tthe Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The platform version the task uses. A platform version is only specified for tasks\n\t\t\thosted on Fargate. If one isn't specified, the LATEST\n\t\t\tplatform version is used. For more information, see Fargate platform\n\t\t\t\tversions in the Amazon Elastic Container Service Developer Guide.

" } }, "propagateTags": { "target": "com.amazonaws.ecs#PropagateTags", "traits": { - "smithy.api#documentation": "

Specifies whether to propagate the tags from the task definition to the task. If no value is\n\t\t\tspecified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To\n\t\t\tadd tags to a task after task creation, use theTagResource API action.

\n \n

An error will be received if you specify the SERVICE option when running a\n\t\t\t\ttask.

\n
" + "smithy.api#documentation": "

Specifies whether to propagate the tags from the task definition to the task. If no\n\t\t\tvalue is specified, the tags aren't propagated. Tags can only be propagated to the task\n\t\t\tduring task creation. To add tags to a task after task creation, use theTagResource API action.

\n \n

An error will be received if you specify the SERVICE option when\n\t\t\t\trunning a task.

\n
" } }, "referenceId": { @@ -10014,33 +10020,33 @@ "startedBy": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

An optional tag specified when a task is started. For example, if you automatically trigger a task to\n\t\t\trun a batch process job, you could apply a unique identifier for that job to your task with the\n\t\t\t\tstartedBy parameter. You can then identify which tasks belong to that job by filtering\n\t\t\tthe results of a ListTasks call with the startedBy value. Up to 128 letters (uppercase and\n\t\t\tlowercase), numbers, hyphens (-), forward slash (/), and underscores (_) are allowed.

\n

If a task is started by an Amazon ECS service, then the startedBy parameter contains the\n\t\t\tdeployment ID of the service that starts it.

" + "smithy.api#documentation": "

An optional tag specified when a task is started. For example, if you automatically\n\t\t\ttrigger a task to run a batch process job, you could apply a unique identifier for that\n\t\t\tjob to your task with the startedBy parameter. You can then identify which\n\t\t\ttasks belong to that job by filtering the results of a ListTasks call with\n\t\t\tthe startedBy value. Up to 128 letters (uppercase and lowercase), numbers,\n\t\t\thyphens (-), forward slash (/), and underscores (_) are allowed.

\n

If a task is started by an Amazon ECS service, then the startedBy parameter\n\t\t\tcontains the deployment ID of the service that starts it.

" } }, "tags": { "target": "com.amazonaws.ecs#Tags", "traits": { - "smithy.api#documentation": "

The metadata that you apply to the task to help you categorize and organize them. Each tag consists\n\t\t\tof a key and an optional value, both of which you define.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" + "smithy.api#documentation": "

The metadata that you apply to the task to help you categorize and organize them. Each\n\t\t\ttag consists of a key and an optional value, both of which you define.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" } }, "taskDefinition": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The family and revision (family:revision) or full ARN of the\n\t\t\ttask definition to run. If a revision isn't specified, the latest ACTIVE\n\t\t\trevision is used.

\n

The full ARN value must match the value that you specified as the Resource of the\n\t\t\tprincipal's permissions policy.

\n

When you specify a task definition, you must either specify a specific revision, or all revisions in\n\t\t\tthe ARN.

\n

To specify a specific revision, include the revision number in the ARN. For example, to specify\n\t\t\trevision 2, use\n\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:2.

\n

To specify all revisions, use the wildcard (*) in the ARN. For example, to specify all revisions,\n\t\t\tuse arn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:*.

\n

For more information, see Policy Resources for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

", + "smithy.api#documentation": "

The family and revision (family:revision) or\n\t\t\tfull ARN of the task definition to run. If a revision isn't specified,\n\t\t\tthe latest ACTIVE revision is used.

\n

The full ARN value must match the value that you specified as the\n\t\t\t\tResource of the principal's permissions policy.

\n

When you specify a task definition, you must either specify a specific revision, or\n\t\t\tall revisions in the ARN.

\n

To specify a specific revision, include the revision number in the ARN. For example,\n\t\t\tto specify revision 2, use\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:2.

\n

To specify all revisions, use the wildcard (*) in the ARN. For example, to specify\n\t\t\tall revisions, use\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:*.

\n

For more information, see Policy Resources for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

", "smithy.api#required": {} } }, "clientToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

An identifier that you provide to ensure the idempotency of the request. It must be unique and is\n\t\t\tcase sensitive. Up to 64 characters are allowed. The valid characters are characters in the range of\n\t\t\t33-126, inclusive. For more information, see Ensuring\n\t\t\t\tidempotency.

", + "smithy.api#documentation": "

An identifier that you provide to ensure the idempotency of the request. It must be\n\t\t\tunique and is case sensitive. Up to 64 characters are allowed. The valid characters are\n\t\t\tcharacters in the range of 33-126, inclusive. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, "volumeConfigurations": { "target": "com.amazonaws.ecs#TaskVolumeConfigurations", "traits": { - "smithy.api#documentation": "

The details of the volume that was configuredAtLaunch. You can configure the size,\n\t\t\tvolumeType, IOPS, throughput, snapshot and encryption in in TaskManagedEBSVolumeConfiguration. The name of the volume must match the\n\t\t\t\tname from the task definition.

" + "smithy.api#documentation": "

The details of the volume that was configuredAtLaunch. You can configure\n\t\t\tthe size, volumeType, IOPS, throughput, snapshot and encryption in in TaskManagedEBSVolumeConfiguration. The name of the volume must\n\t\t\tmatch the name from the task definition.

" } } }, @@ -10054,13 +10060,13 @@ "tasks": { "target": "com.amazonaws.ecs#Tasks", "traits": { - "smithy.api#documentation": "

A full description of the tasks that were run. The tasks that were successfully placed on your\n\t\t\tcluster are described here.

" + "smithy.api#documentation": "

A full description of the tasks that were run. The tasks that were successfully placed\n\t\t\ton your cluster are described here.

" } }, "failures": { "target": "com.amazonaws.ecs#Failures", "traits": { - "smithy.api#documentation": "

Any failures associated with the call.

\n

For information about how to address failures, see Service event messages and API failure reasons in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Any failures associated with the call.

\n

For information about how to address failures, see Service event messages and API failure\n\t\t\t\treasons in the Amazon Elastic Container Service Developer Guide.

" } } }, @@ -10074,7 +10080,7 @@ "cpuArchitecture": { "target": "com.amazonaws.ecs#CPUArchitecture", "traits": { - "smithy.api#documentation": "

The CPU architecture.

\n

You can run your Linux tasks on an ARM-based platform by setting the value to ARM64.\n\t\t\tThis option is available for tasks that run on Linux Amazon EC2 instance or Linux containers on\n\t\t\tFargate.

" + "smithy.api#documentation": "

The CPU architecture.

\n

You can run your Linux tasks on an ARM-based platform by setting the value to\n\t\t\t\tARM64. This option is available for tasks that run on Linux Amazon EC2\n\t\t\tinstance or Linux containers on Fargate.

" } }, "operatingSystemFamily": { @@ -10095,7 +10101,7 @@ "target": "com.amazonaws.ecs#Double", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The value, specified as a percent total of a service's desiredCount, to scale the task\n\t\t\tset. Accepted values are numbers between 0 and 100.

" + "smithy.api#documentation": "

The value, specified as a percent total of a service's desiredCount, to\n\t\t\tscale the task set. Accepted values are numbers between 0 and 100.

" } }, "unit": { @@ -10106,7 +10112,7 @@ } }, "traits": { - "smithy.api#documentation": "

A floating-point percentage of the desired number of tasks to place and keep running in the task\n\t\t\tset.

" + "smithy.api#documentation": "

A floating-point percentage of the desired number of tasks to place and keep running\n\t\t\tin the task set.

" } }, "com.amazonaws.ecs#ScaleUnit": { @@ -10167,13 +10173,13 @@ "valueFrom": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The secret to expose to the container. The supported values are either the full ARN of the Secrets Manager secret or the full ARN of the parameter in the SSM Parameter Store.

\n

For information about the require Identity and Access Management permissions, see Required\n\t\t\t\tIAM permissions for Amazon ECS secrets (for Secrets Manager) or Required IAM\n\t\t\t\tpermissions for Amazon ECS secrets (for Systems Manager Parameter store) in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

\n \n

If the SSM Parameter Store parameter exists in the same Region as the task you're launching,\n\t\t\t\tthen you can use either the full ARN or name of the parameter. If the parameter exists in a\n\t\t\t\tdifferent Region, then the full ARN must be specified.

\n
", + "smithy.api#documentation": "

The secret to expose to the container. The supported values are either the full ARN\n\t\t\tof the Secrets Manager secret or the full ARN of the parameter in the SSM\n\t\t\tParameter Store.

\n

For information about the require Identity and Access Management permissions, see Required IAM permissions for Amazon ECS secrets (for Secrets Manager) or\n\t\t\t\tRequired IAM permissions for Amazon ECS secrets (for Systems Manager Parameter\n\t\t\tstore) in the Amazon Elastic Container Service Developer Guide.

\n \n

If the SSM Parameter Store parameter exists in the same Region as the task\n\t\t\t\tyou're launching, then you can use either the full ARN or name of the parameter.\n\t\t\t\tIf the parameter exists in a different Region, then the full ARN must be\n\t\t\t\tspecified.

\n
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

An object representing the secret to expose to your container. Secrets can be exposed to a container\n\t\t\tin the following ways:

\n
    \n
  • \n

    To inject sensitive data into your containers as environment variables, use the\n\t\t\t\t\t\tsecrets container definition parameter.

    \n
  • \n
  • \n

    To reference sensitive information in the log configuration of a container, use the\n\t\t\t\t\t\tsecretOptions container definition parameter.

    \n
  • \n
\n

For more information, see Specifying sensitive\n\t\t\t\tdata in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

An object representing the secret to expose to your container. Secrets can be exposed\n\t\t\tto a container in the following ways:

\n
    \n
  • \n

    To inject sensitive data into your containers as environment variables, use\n\t\t\t\t\tthe secrets container definition parameter.

    \n
  • \n
  • \n

    To reference sensitive information in the log configuration of a container,\n\t\t\t\t\tuse the secretOptions container definition parameter.

    \n
  • \n
\n

For more information, see Specifying\n\t\t\t\tsensitive data in the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#SecretList": { @@ -10209,13 +10215,13 @@ "serviceArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The ARN that identifies the service. For more information about the ARN format, see Amazon Resource Name (ARN)\n\t\t\tin the Amazon ECS Developer Guide.

" + "smithy.api#documentation": "

The ARN that identifies the service. For more information about the ARN format,\n\t\t\tsee Amazon Resource Name (ARN) in the Amazon ECS Developer Guide.

" } }, "serviceName": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of your service. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. Service names must be unique within a cluster.\n\t\t\tHowever, you can have similarly named services in multiple clusters within a Region or across multiple\n\t\t\tRegions.

" + "smithy.api#documentation": "

The name of your service. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. Service names must be unique within\n\t\t\ta cluster. However, you can have similarly named services in multiple clusters within a\n\t\t\tRegion or across multiple Regions.

" } }, "clusterArn": { @@ -10227,26 +10233,26 @@ "loadBalancers": { "target": "com.amazonaws.ecs#LoadBalancers", "traits": { - "smithy.api#documentation": "

A list of Elastic Load Balancing load balancer objects. It contains the load balancer name, the container name, and\n\t\t\tthe container port to access from the load balancer. The container name is as it appears in a container\n\t\t\tdefinition.

" + "smithy.api#documentation": "

A list of Elastic Load Balancing load balancer objects. It contains the load balancer name, the\n\t\t\tcontainer name, and the container port to access from the load balancer. The container\n\t\t\tname is as it appears in a container definition.

" } }, "serviceRegistries": { "target": "com.amazonaws.ecs#ServiceRegistries", "traits": { - "smithy.api#documentation": "

The details for the service discovery registries to assign to this service. For more information, see\n\t\t\t\tService\n\t\t\t\tDiscovery.

" + "smithy.api#documentation": "

The details for the service discovery registries to assign to this service. For more\n\t\t\tinformation, see Service\n\t\t\t\tDiscovery.

" } }, "status": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The status of the service. The valid values are ACTIVE, DRAINING, or\n\t\t\t\tINACTIVE.

" + "smithy.api#documentation": "

The status of the service. The valid values are ACTIVE,\n\t\t\t\tDRAINING, or INACTIVE.

" } }, "desiredCount": { "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The desired number of instantiations of the task definition to keep running on the service. This\n\t\t\tvalue is specified when the service is created with CreateService , and it can be\n\t\t\tmodified with UpdateService.

" + "smithy.api#documentation": "

The desired number of instantiations of the task definition to keep running on the\n\t\t\tservice. This value is specified when the service is created with CreateService , and it can be modified with UpdateService.

" } }, "runningCount": { @@ -10266,43 +10272,43 @@ "launchType": { "target": "com.amazonaws.ecs#LaunchType", "traits": { - "smithy.api#documentation": "

The launch type the service is using. When using the DescribeServices API, this field is omitted if\n\t\t\tthe service was created using a capacity provider strategy.

" + "smithy.api#documentation": "

The launch type the service is using. When using the DescribeServices API, this field\n\t\t\tis omitted if the service was created using a capacity provider strategy.

" } }, "capacityProviderStrategy": { "target": "com.amazonaws.ecs#CapacityProviderStrategy", "traits": { - "smithy.api#documentation": "

The capacity provider strategy the service uses. When using the DescribeServices API, this field is\n\t\t\tomitted if the service was created using a launch type.

" + "smithy.api#documentation": "

The capacity provider strategy the service uses. When using the DescribeServices API,\n\t\t\tthis field is omitted if the service was created using a launch type.

" } }, "platformVersion": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The platform version to run your service on. A platform version is only specified for tasks that are\n\t\t\thosted on Fargate. If one isn't specified, the LATEST platform version is used. For more\n\t\t\tinformation, see Fargate Platform Versions in\n\t\t\tthe Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The platform version to run your service on. A platform version is only specified for\n\t\t\ttasks that are hosted on Fargate. If one isn't specified, the LATEST\n\t\t\tplatform version is used. For more information, see Fargate Platform\n\t\t\t\tVersions in the Amazon Elastic Container Service Developer Guide.

" } }, "platformFamily": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The operating system that your tasks in the service run on. A platform family is specified only for\n\t\t\ttasks using the Fargate launch type.

\n

All tasks that run as part of this service must use the same platformFamily value as\n\t\t\tthe service (for example, LINUX).

" + "smithy.api#documentation": "

The operating system that your tasks in the service run on. A platform family is\n\t\t\tspecified only for tasks using the Fargate launch type.

\n

All tasks that run as part of this service must use the same\n\t\t\t\tplatformFamily value as the service (for example,\n\t\t\tLINUX).

" } }, "taskDefinition": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The task definition to use for tasks in the service. This value is specified when the service is\n\t\t\tcreated with CreateService, and it can be modified with UpdateService.

" + "smithy.api#documentation": "

The task definition to use for tasks in the service. This value is specified when the\n\t\t\tservice is created with CreateService,\n\t\t\tand it can be modified with UpdateService.

" } }, "deploymentConfiguration": { "target": "com.amazonaws.ecs#DeploymentConfiguration", "traits": { - "smithy.api#documentation": "

Optional deployment parameters that control how many tasks run during the deployment and the ordering\n\t\t\tof stopping and starting tasks.

" + "smithy.api#documentation": "

Optional deployment parameters that control how many tasks run during the deployment\n\t\t\tand the ordering of stopping and starting tasks.

" } }, "taskSets": { "target": "com.amazonaws.ecs#TaskSets", "traits": { - "smithy.api#documentation": "

Information about a set of Amazon ECS tasks in either an CodeDeploy or an EXTERNAL deployment. An\n\t\t\tAmazon ECS task set includes details such as the desired number of tasks, how many tasks are running, and\n\t\t\twhether the task set serves production traffic.

" + "smithy.api#documentation": "

Information about a set of Amazon ECS tasks in either an CodeDeploy or an EXTERNAL\n\t\t\tdeployment. An Amazon ECS task set includes details such as the desired number of tasks, how\n\t\t\tmany tasks are running, and whether the task set serves production traffic.

" } }, "deployments": { @@ -10314,13 +10320,13 @@ "roleArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The ARN of the IAM role that's associated with the service. It allows the Amazon ECS container agent\n\t\t\tto register container instances with an Elastic Load Balancing load balancer.

" + "smithy.api#documentation": "

The ARN of the IAM role that's associated with the service. It allows the Amazon ECS\n\t\t\tcontainer agent to register container instances with an Elastic Load Balancing load balancer.

" } }, "events": { "target": "com.amazonaws.ecs#ServiceEvents", "traits": { - "smithy.api#documentation": "

The event stream for your service. A maximum of 100 of the latest events are displayed.

" + "smithy.api#documentation": "

The event stream for your service. A maximum of 100 of the latest events are\n\t\t\tdisplayed.

" } }, "createdAt": { @@ -10350,13 +10356,13 @@ "healthCheckGracePeriodSeconds": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing target\n\t\t\thealth checks after a task has first started.

" + "smithy.api#documentation": "

The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy\n\t\t\tElastic Load Balancing target health checks after a task has first started.

" } }, "schedulingStrategy": { "target": "com.amazonaws.ecs#SchedulingStrategy", "traits": { - "smithy.api#documentation": "

The scheduling strategy to use for the service. For more information, see Services.

\n

There are two service scheduler strategies available.

\n
    \n
  • \n

    \n REPLICA-The replica scheduling strategy places and maintains the desired\n\t\t\t\t\tnumber of tasks across your cluster. By default, the service scheduler spreads tasks across\n\t\t\t\t\tAvailability Zones. You can use task placement strategies and constraints to customize task\n\t\t\t\t\tplacement decisions.

    \n
  • \n
  • \n

    \n DAEMON-The daemon scheduling strategy deploys exactly one task on each\n\t\t\t\t\tactive container instance. This task meets all of the task placement constraints that you\n\t\t\t\t\tspecify in your cluster. The service scheduler also evaluates the task placement constraints\n\t\t\t\t\tfor running tasks. It stop tasks that don't meet the placement constraints.

    \n \n

    Fargate tasks don't support the DAEMON scheduling\n\t\t\t\t\t\tstrategy.

    \n
    \n
  • \n
" + "smithy.api#documentation": "

The scheduling strategy to use for the service. For more information, see Services.

\n

There are two service scheduler strategies available.

\n
    \n
  • \n

    \n REPLICA-The replica scheduling strategy places and\n\t\t\t\t\tmaintains the desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement\n\t\t\t\t\tdecisions.

    \n
  • \n
  • \n

    \n DAEMON-The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container instance. This task meets all of the task\n\t\t\t\t\tplacement constraints that you specify in your cluster. The service scheduler\n\t\t\t\t\talso evaluates the task placement constraints for running tasks. It stop tasks\n\t\t\t\t\tthat don't meet the placement constraints.

    \n \n

    Fargate tasks don't support the DAEMON\n\t\t\t\t\t\tscheduling strategy.

    \n
    \n
  • \n
" } }, "deploymentController": { @@ -10368,7 +10374,7 @@ "tags": { "target": "com.amazonaws.ecs#Tags", "traits": { - "smithy.api#documentation": "

The metadata that you apply to the service to help you categorize and organize them. Each tag\n\t\t\tconsists of a key and an optional value. You define bot the key and value.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" + "smithy.api#documentation": "

The metadata that you apply to the service to help you categorize and organize them.\n\t\t\tEach tag consists of a key and an optional value. You define bot the key and\n\t\t\tvalue.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" } }, "createdBy": { @@ -10381,20 +10387,20 @@ "target": "com.amazonaws.ecs#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Determines whether to use Amazon ECS managed tags for the tasks in the service. For more information, see\n\t\t\t\tTagging Your\n\t\t\t\tAmazon ECS Resources in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Determines whether to use Amazon ECS managed tags for the tasks in the service. For more\n\t\t\tinformation, see Tagging Your Amazon ECS\n\t\t\t\tResources in the Amazon Elastic Container Service Developer Guide.

" } }, "propagateTags": { "target": "com.amazonaws.ecs#PropagateTags", "traits": { - "smithy.api#documentation": "

Determines whether to propagate the tags from the task definition or the service to the task. If no\n\t\t\tvalue is specified, the tags aren't propagated.

" + "smithy.api#documentation": "

Determines whether to propagate the tags from the task definition or the service to\n\t\t\tthe task. If no value is specified, the tags aren't propagated.

" } }, "enableExecuteCommand": { "target": "com.amazonaws.ecs#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Determines whether the execute command functionality is turned on for the service. If\n\t\t\t\ttrue, the execute command functionality is turned on for all containers in tasks as\n\t\t\tpart of the service.

" + "smithy.api#documentation": "

Determines whether the execute command functionality is turned on for the service. If\n\t\t\t\ttrue, the execute command functionality is turned on for all containers\n\t\t\tin tasks as part of the service.

" } }, "availabilityZoneRebalancing": { @@ -10414,19 +10420,19 @@ "port": { "target": "com.amazonaws.ecs#PortNumber", "traits": { - "smithy.api#documentation": "

The listening port number for the Service Connect proxy. This port is available inside of all of the\n\t\t\ttasks within the same namespace.

\n

To avoid changing your applications in client Amazon ECS services, set this to the same port that the\n\t\t\tclient application uses by default. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.

", + "smithy.api#documentation": "

The listening port number for the Service Connect proxy. This port is available\n\t\t\tinside of all of the tasks within the same namespace.

\n

To avoid changing your applications in client Amazon ECS services, set this to the same\n\t\t\tport that the client application uses by default. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.

", "smithy.api#required": {} } }, "dnsName": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The dnsName is the name that you use in the applications of client tasks to connect to\n\t\t\tthis service. The name must be a valid DNS name but doesn't need to be fully-qualified. The name can\n\t\t\tinclude up to 127 characters. The name can include lowercase letters, numbers, underscores (_), hyphens\n\t\t\t(-), and periods (.). The name can't start with a hyphen.

\n

If this parameter isn't specified, the default value of discoveryName.namespace is used. If the discoveryName isn't specified, the port mapping name from the task definition is used in portName.namespace.

\n

To avoid changing your applications in client Amazon ECS services, set this to the same name that the\n\t\t\tclient application uses by default. For example, a few common names are database,\n\t\t\t\tdb, or the lowercase name of a database, such as mysql or\n\t\t\t\tredis. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The dnsName is the name that you use in the applications of client tasks\n\t\t\tto connect to this service. The name must be a valid DNS name but doesn't need to be\n\t\t\tfully-qualified. The name can include up to 127 characters. The name can include\n\t\t\tlowercase letters, numbers, underscores (_), hyphens (-), and periods (.). The name\n\t\t\tcan't start with a hyphen.

\n

If this parameter isn't specified, the default value of discoveryName.namespace is used. If the discoveryName isn't specified, the port mapping name from the task definition is used in portName.namespace.

\n

To avoid changing your applications in client Amazon ECS services, set this to the same\n\t\t\tname that the client application uses by default. For example, a few common names are\n\t\t\t\tdatabase, db, or the lowercase name of a database, such as\n\t\t\t\tmysql or redis. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.

" } } }, "traits": { - "smithy.api#documentation": "

Each alias (\"endpoint\") is a fully-qualified name and port number that other tasks (\"clients\") can\n\t\t\tuse to connect to this service.

\n

Each name and port mapping must be unique within the namespace.

\n

Tasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Each alias (\"endpoint\") is a fully-qualified name and port number that other tasks\n\t\t\t(\"clients\") can use to connect to this service.

\n

Each name and port mapping must be unique within the namespace.

\n

Tasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#ServiceConnectClientAliasList": { @@ -10449,13 +10455,13 @@ "namespace": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace for use with Service Connect. The namespace must be in the same Amazon Web Services\n\t\t\tRegion as the Amazon ECS service and cluster. The type of namespace doesn't affect Service Connect. For\n\t\t\tmore information about Cloud Map, see Working with Services in the\n\t\t\tCloud Map Developer Guide.

" + "smithy.api#documentation": "

The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace for use with Service Connect. The namespace must be in\n\t\t\tthe same Amazon Web Services Region as the Amazon ECS service and cluster. The type of namespace doesn't\n\t\t\taffect Service Connect. For more information about Cloud Map, see Working\n\t\t\t\twith Services in the Cloud Map Developer Guide.

" } }, "services": { "target": "com.amazonaws.ecs#ServiceConnectServiceList", "traits": { - "smithy.api#documentation": "

The list of Service Connect service objects. These are names and aliases (also known as endpoints)\n\t\t\tthat are used by other Amazon ECS services to connect to this service.

\n

This field is not required for a \"client\" Amazon ECS service that's a member of a namespace only to\n\t\t\tconnect to other services within the namespace. An example of this would be a frontend application that\n\t\t\taccepts incoming requests from either a load balancer that's attached to the service or by other\n\t\t\tmeans.

\n

An object selects a port from the task definition, assigns a name for the Cloud Map service, and a\n\t\t\tlist of aliases (endpoints) and ports for client applications to refer to this service.

" + "smithy.api#documentation": "

The list of Service Connect service objects. These are names and aliases (also known\n\t\t\tas endpoints) that are used by other Amazon ECS services to connect to this service.\n\t\t\t

\n

This field is not required for a \"client\" Amazon ECS service that's a member of a namespace\n\t\t\tonly to connect to other services within the namespace. An example of this would be a\n\t\t\tfrontend application that accepts incoming requests from either a load balancer that's\n\t\t\tattached to the service or by other means.

\n

An object selects a port from the task definition, assigns a name for the Cloud Map\n\t\t\tservice, and a list of aliases (endpoints) and ports for client applications to refer to\n\t\t\tthis service.

" } }, "logConfiguration": { @@ -10463,7 +10469,7 @@ } }, "traits": { - "smithy.api#documentation": "

The Service Connect configuration of your Amazon ECS service. The configuration for this service to\n\t\t\tdiscover and connect to services, and be discovered by, and connected from, other services within a\n\t\t\tnamespace.

\n

Tasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The Service Connect configuration of your Amazon ECS service. The configuration for this\n\t\t\tservice to discover and connect to services, and be discovered by, and connected from,\n\t\t\tother services within a namespace.

\n

Tasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#ServiceConnectService": { @@ -10472,7 +10478,7 @@ "portName": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The portName must match the name of one of the portMappings from all the\n\t\t\tcontainers in the task definition of this Amazon ECS service.

", + "smithy.api#documentation": "

The portName must match the name of one of the portMappings\n\t\t\tfrom all the containers in the task definition of this Amazon ECS service.

", "smithy.api#required": {} } }, @@ -10485,25 +10491,25 @@ "clientAliases": { "target": "com.amazonaws.ecs#ServiceConnectClientAliasList", "traits": { - "smithy.api#documentation": "

The list of client aliases for this Service Connect service. You use these to assign names that can\n\t\t\tbe used by client applications. The maximum number of client aliases that you can have in this list is\n\t\t\t1.

\n

Each alias (\"endpoint\") is a fully-qualified name and port number that other Amazon ECS tasks (\"clients\")\n\t\t\tcan use to connect to this service.

\n

Each name and port mapping must be unique within the namespace.

\n

For each ServiceConnectService, you must provide at least one clientAlias\n\t\t\twith one port.

" + "smithy.api#documentation": "

The list of client aliases for this Service Connect service. You use these to assign\n\t\t\tnames that can be used by client applications. The maximum number of client aliases that\n\t\t\tyou can have in this list is 1.

\n

Each alias (\"endpoint\") is a fully-qualified name and port number that other Amazon ECS\n\t\t\ttasks (\"clients\") can use to connect to this service.

\n

Each name and port mapping must be unique within the namespace.

\n

For each ServiceConnectService, you must provide at least one\n\t\t\t\tclientAlias with one port.

" } }, "ingressPortOverride": { "target": "com.amazonaws.ecs#PortNumber", "traits": { - "smithy.api#documentation": "

The port number for the Service Connect proxy to listen on.

\n

Use the value of this field to bypass the proxy for traffic on the port number specified in the named\n\t\t\t\tportMapping in the task definition of this application, and then use it in your VPC\n\t\t\tsecurity groups to allow traffic into the proxy for this Amazon ECS service.

\n

In awsvpc mode and Fargate, the default value is the container port number. The\n\t\t\tcontainer port number is in the portMapping in the task definition. In bridge mode, the\n\t\t\tdefault value is the ephemeral port of the Service Connect proxy.

" + "smithy.api#documentation": "

The port number for the Service Connect proxy to listen on.

\n

Use the value of this field to bypass the proxy for traffic on the port number\n\t\t\tspecified in the named portMapping in the task definition of this\n\t\t\tapplication, and then use it in your VPC security groups to allow traffic into the proxy\n\t\t\tfor this Amazon ECS service.

\n

In awsvpc mode and Fargate, the default value is the container port\n\t\t\tnumber. The container port number is in the portMapping in the task\n\t\t\tdefinition. In bridge mode, the default value is the ephemeral port of the\n\t\t\tService Connect proxy.

" } }, "timeout": { "target": "com.amazonaws.ecs#TimeoutConfiguration", "traits": { - "smithy.api#documentation": "

A reference to an object that represents the configured timeouts for Service Connect.

" + "smithy.api#documentation": "

A reference to an object that represents the configured timeouts for\n\t\t\tService Connect.

" } }, "tls": { "target": "com.amazonaws.ecs#ServiceConnectTlsConfiguration", "traits": { - "smithy.api#documentation": "

A reference to an object that represents a Transport Layer Security (TLS) configuration.

" + "smithy.api#documentation": "

A reference to an object that represents a Transport Layer Security (TLS)\n\t\t\tconfiguration.

" } } }, @@ -10529,12 +10535,12 @@ "discoveryArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the namespace in Cloud Map that matches the discovery name for this\n\t\t\tService Connect resource. You can use this ARN in other integrations with Cloud Map. However,\n\t\t\tService Connect can't ensure connectivity outside of Amazon ECS.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the namespace in Cloud Map that matches the discovery name for this\n\t\t\tService Connect resource. You can use this ARN in other integrations with Cloud Map.\n\t\t\tHowever, Service Connect can't ensure connectivity outside of Amazon ECS.

" } } }, "traits": { - "smithy.api#documentation": "

The Service Connect resource. Each configuration maps a discovery name to a Cloud Map service name.\n\t\t\tThe data is stored in Cloud Map as part of the Service Connect configuration for each discovery name\n\t\t\tof this Amazon ECS service.

\n

A task can resolve the dnsName for each of the clientAliases of a service.\n\t\t\tHowever a task can't resolve the discovery names. If you want to connect to a service, refer to the\n\t\t\t\tServiceConnectConfiguration of that service for the list of clientAliases\n\t\t\tthat you can use.

" + "smithy.api#documentation": "

The Service Connect resource. Each configuration maps a discovery name to a\n\t\t\tCloud Map service name. The data is stored in Cloud Map as part of the\n\t\t\tService Connect configuration for each discovery name of this Amazon ECS service.

\n

A task can resolve the dnsName for each of the clientAliases\n\t\t\tof a service. However a task can't resolve the discovery names. If you want to connect\n\t\t\tto a service, refer to the ServiceConnectConfiguration of that service for\n\t\t\tthe list of clientAliases that you can use.

" } }, "com.amazonaws.ecs#ServiceConnectServiceResourceList": { @@ -10608,25 +10614,25 @@ "createdAt": { "target": "com.amazonaws.ecs#Timestamp", "traits": { - "smithy.api#documentation": "

The time the service deployment was created. The format is yyyy-MM-dd HH:mm:ss.SSSSSS.

" + "smithy.api#documentation": "

The time the service deployment was created. The format is yyyy-MM-dd\n\t\t\tHH:mm:ss.SSSSSS.

" } }, "startedAt": { "target": "com.amazonaws.ecs#Timestamp", "traits": { - "smithy.api#documentation": "

The time the service deployment statred. The format is yyyy-MM-dd HH:mm:ss.SSSSSS.

" + "smithy.api#documentation": "

The time the service deployment statred. The format is yyyy-MM-dd\n\t\t\tHH:mm:ss.SSSSSS.

" } }, "finishedAt": { "target": "com.amazonaws.ecs#Timestamp", "traits": { - "smithy.api#documentation": "

The time the service deployment finished. The format is yyyy-MM-dd HH:mm:ss.SSSSSS.

" + "smithy.api#documentation": "

The time the service deployment finished. The format is yyyy-MM-dd\n\t\t\tHH:mm:ss.SSSSSS.

" } }, "stoppedAt": { "target": "com.amazonaws.ecs#Timestamp", "traits": { - "smithy.api#documentation": "

The time the service deployment stopped. The format is yyyy-MM-dd HH:mm:ss.SSSSSS.

\n

The service deployment stops when any of the following actions happen:

\n
    \n
  • \n

    A user manually stops the deployment

    \n
  • \n
  • \n

    The rollback option is not in use for the failure detection mechanism (the\n\t\t\t\t\tcircuit breaker or alarm-based) and the service fails.

    \n
  • \n
" + "smithy.api#documentation": "

The time the service deployment stopped. The format is yyyy-MM-dd\n\t\t\tHH:mm:ss.SSSSSS.

\n

The service deployment stops when any of the following actions happen:

\n
    \n
  • \n

    A user manually stops the deployment

    \n
  • \n
  • \n

    The rollback option is not in use for the failure detection mechanism (the\n\t\t\t\t\tcircuit breaker or alarm-based) and the service fails.

    \n
  • \n
" } }, "updatedAt": { @@ -10656,7 +10662,7 @@ "statusReason": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

Information about why the service deployment is in the current status. For example, the circuit breaker detected a failure.

" + "smithy.api#documentation": "

Information about why the service deployment is in the current status. For example,\n\t\t\tthe circuit breaker detected a failure.

" } }, "deploymentConfiguration": { @@ -10682,7 +10688,7 @@ } }, "traits": { - "smithy.api#documentation": "

Information about the service deployment.

\n

Service deployments provide a comprehensive view of your deployments. For information\n\t\t\tabout service deployments, see View service history using Amazon ECS service deployments\n\t\t\tin the \n Amazon Elastic Container Service Developer Guide\n .

" + "smithy.api#documentation": "

Information about the service deployment.

\n

Service deployments provide a comprehensive view of your deployments. For information\n\t\t\tabout service deployments, see View service history\n\t\t\t\tusing Amazon ECS service deployments in the\n\t\t\t\n Amazon Elastic Container Service Developer Guide\n .

" } }, "com.amazonaws.ecs#ServiceDeploymentAlarms": { @@ -10691,24 +10697,24 @@ "status": { "target": "com.amazonaws.ecs#ServiceDeploymentRollbackMonitorsStatus", "traits": { - "smithy.api#documentation": "

The status of the alarms check. Amazon ECS is not using alarms for service deployment failures when the status is DISABLED.

" + "smithy.api#documentation": "

The status of the alarms check. Amazon ECS is not using alarms for service deployment\n\t\t\tfailures when the status is DISABLED.

" } }, "alarmNames": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The name of the CloudWatch alarms that determine when a service deployment failed. A \",\" separates the alarms.

" + "smithy.api#documentation": "

The name of the CloudWatch alarms that determine when a service deployment failed. A\n\t\t\t\",\" separates the alarms.

" } }, "triggeredAlarmNames": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

One or more CloudWatch alarm names that have been triggered during the service deployment. A \",\"\n\t\t\tseparates the alarm names.

" + "smithy.api#documentation": "

One or more CloudWatch alarm names that have been triggered during the service\n\t\t\tdeployment. A \",\" separates the alarm names.

" } } }, "traits": { - "smithy.api#documentation": "

The CloudWatch alarms used to determine a service deployment failed.

\n

Amazon ECS considers the service deployment as failed when any of the alarms move to the ALARM state. For more information, see How CloudWatch alarms detect Amazon ECS deployment failures in the Amazon ECS Developer Guide.

" + "smithy.api#documentation": "

The CloudWatch alarms used to determine a service deployment failed.

\n

Amazon ECS considers the service deployment as failed when any of the alarms move to\n\t\t\tthe ALARM state. For more information, see How CloudWatch\n\t\t\t\talarms detect Amazon ECS deployment failures in the Amazon ECS Developer\n\t\t\tGuide.

" } }, "com.amazonaws.ecs#ServiceDeploymentBrief": { @@ -10765,12 +10771,12 @@ "statusReason": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

Information about why the service deployment is in the current status. For example, the circuit breaker detected a deployment failure.

" + "smithy.api#documentation": "

Information about why the service deployment is in the current status. For example,\n\t\t\tthe circuit breaker detected a deployment failure.

" } } }, "traits": { - "smithy.api#documentation": "

The service deployment properties that are retured when you call ListServiceDeployments.

\n

This provides a high-level overview of the service deployment.

" + "smithy.api#documentation": "

The service deployment properties that are retured when you call\n\t\t\t\tListServiceDeployments.

\n

This provides a high-level overview of the service deployment.

" } }, "com.amazonaws.ecs#ServiceDeploymentCircuitBreaker": { @@ -10779,7 +10785,7 @@ "status": { "target": "com.amazonaws.ecs#ServiceDeploymentRollbackMonitorsStatus", "traits": { - "smithy.api#documentation": "

The circuit breaker status. Amazon ECS is not using the circuit breaker for service deployment failures when the status is DISABLED.

" + "smithy.api#documentation": "

The circuit breaker status. Amazon ECS is not using the circuit breaker for service\n\t\t\tdeployment failures when the status is DISABLED.

" } }, "failureCount": { @@ -10793,12 +10799,12 @@ "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The threshhold which determines that the service deployment failed.

\n

The deployment circuit breaker calculates the threshold value, and then uses the value to\n\t\t\tdetermine when to move the deployment to a FAILED state. The deployment circuit breaker\n\t\t\thas a minimum threshold of 3 and a maximum threshold of 200. and uses the values in the\n\t\t\tfollowing formula to determine the deployment failure.

\n

\n 0.5 * desired task count\n

" + "smithy.api#documentation": "

The threshhold which determines that the service deployment failed.

\n

The deployment circuit breaker calculates the threshold value, and then uses the value\n\t\t\tto determine when to move the deployment to a FAILED state. The deployment circuit\n\t\t\tbreaker has a minimum threshold of 3 and a maximum threshold of 200. and uses the values\n\t\t\tin the following formula to determine the deployment failure.

\n

\n 0.5 * desired task count\n

" } } }, "traits": { - "smithy.api#documentation": "

Information about the circuit breaker used to determine when a service deployment has\n\t\t\tfailed.

\n

The deployment circuit breaker is the rolling update mechanism that determines if the\n\t\t\ttasks reach a steady state. The deployment circuit breaker has an option that will\n\t\t\tautomatically roll back a failed deployment to the last cpompleted service\n\t\t\trevision. For more information, see How the Amazon ECS\n\t\t\t\tdeployment circuit breaker detects failures in the Amazon ECS Developer\n\t\t\t\t\tGuide.

" + "smithy.api#documentation": "

Information about the circuit breaker used to determine when a service deployment has\n\t\t\tfailed.

\n

The deployment circuit breaker is the rolling update mechanism that determines if the\n\t\t\ttasks reach a steady state. The deployment circuit breaker has an option that will\n\t\t\tautomatically roll back a failed deployment to the last cpompleted service revision. For\n\t\t\tmore information, see How the Amazon\n\t\t\t\tECS deployment circuit breaker detects failures in the Amazon ECS\n\t\t\t\tDeveloper Guide.

" } }, "com.amazonaws.ecs#ServiceDeploymentRollbackMonitorsStatus": { @@ -10956,67 +10962,67 @@ "encrypted": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

Indicates whether the volume should be encrypted. If no value is specified, encryption is turned on\n\t\t\tby default. This parameter maps 1:1 with the Encrypted parameter of the CreateVolume\n\t\t\t\tAPI in the Amazon EC2 API Reference.

" + "smithy.api#documentation": "

Indicates whether the volume should be encrypted. If no value is specified, encryption\n\t\t\tis turned on by default. This parameter maps 1:1 with the Encrypted\n\t\t\tparameter of the CreateVolume API in\n\t\t\tthe Amazon EC2 API Reference.

" } }, "kmsKeyId": { "target": "com.amazonaws.ecs#EBSKMSKeyId", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) identifier of the Amazon Web Services Key Management Service key to use for Amazon EBS encryption. When encryption is turned\n\t\t\ton and no Amazon Web Services Key Management Service key is specified, the default Amazon Web Services managed key for Amazon EBS volumes is used. This\n\t\t\tparameter maps 1:1 with the KmsKeyId parameter of the CreateVolume API in the\n\t\t\t\tAmazon EC2 API Reference.

\n \n

Amazon Web Services authenticates the Amazon Web Services Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or\n\t\t\t\tARN that is invalid, the action can appear to complete, but eventually fails.

\n
" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) identifier of the Amazon Web Services Key Management Service key to use for Amazon EBS encryption. When\n\t\t\tencryption is turned on and no Amazon Web Services Key Management Service key is specified, the default Amazon Web Services managed key\n\t\t\tfor Amazon EBS volumes is used. This parameter maps 1:1 with the KmsKeyId\n\t\t\tparameter of the CreateVolume API in\n\t\t\tthe Amazon EC2 API Reference.

\n \n

Amazon Web Services authenticates the Amazon Web Services Key Management Service key asynchronously. Therefore, if you specify an\n\t\t\t\tID, alias, or ARN that is invalid, the action can appear to complete, but\n\t\t\t\teventually fails.

\n
" } }, "volumeType": { "target": "com.amazonaws.ecs#EBSVolumeType", "traits": { - "smithy.api#documentation": "

The volume type. This parameter maps 1:1 with the VolumeType parameter of the CreateVolume\n\t\t\t\tAPI in the Amazon EC2 API Reference. For more information, see Amazon EBS volume types\n\t\t\tin the Amazon EC2 User Guide.

\n

The following are the supported volume types.

\n
    \n
  • \n

    General Purpose SSD: gp2|gp3\n

    \n
  • \n
  • \n

    Provisioned IOPS SSD: io1|io2\n

    \n
  • \n
  • \n

    Throughput Optimized HDD: st1\n

    \n
  • \n
  • \n

    Cold HDD: sc1\n

    \n
  • \n
  • \n

    Magnetic: standard\n

    \n \n

    The magnetic volume type is not supported on Fargate.

    \n
    \n
  • \n
" + "smithy.api#documentation": "

The volume type. This parameter maps 1:1 with the VolumeType parameter of\n\t\t\tthe CreateVolume API in the Amazon EC2 API Reference. For more\n\t\t\tinformation, see Amazon EBS volume types in\n\t\t\tthe Amazon EC2 User Guide.

\n

The following are the supported volume types.

\n
    \n
  • \n

    General Purpose SSD: gp2|gp3\n

    \n
  • \n
  • \n

    Provisioned IOPS SSD: io1|io2\n

    \n
  • \n
  • \n

    Throughput Optimized HDD: st1\n

    \n
  • \n
  • \n

    Cold HDD: sc1\n

    \n
  • \n
  • \n

    Magnetic: standard\n

    \n \n

    The magnetic volume type is not supported on Fargate.

    \n
    \n
  • \n
" } }, "sizeInGiB": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The size of the volume in GiB. You must specify either a volume size or a snapshot ID. If you specify\n\t\t\ta snapshot ID, the snapshot size is used for the volume size by default. You can optionally specify a\n\t\t\tvolume size greater than or equal to the snapshot size. This parameter maps 1:1 with the\n\t\t\t\tSize parameter of the CreateVolume API in the\n\t\t\t\tAmazon EC2 API Reference.

\n

The following are the supported volume size values for each volume type.

\n
    \n
  • \n

    \n gp2 and gp3: 1-16,384

    \n
  • \n
  • \n

    \n io1 and io2: 4-16,384

    \n
  • \n
  • \n

    \n st1 and sc1: 125-16,384

    \n
  • \n
  • \n

    \n standard: 1-1,024

    \n
  • \n
" + "smithy.api#documentation": "

The size of the volume in GiB. You must specify either a volume size or a snapshot ID.\n\t\t\tIf you specify a snapshot ID, the snapshot size is used for the volume size by default.\n\t\t\tYou can optionally specify a volume size greater than or equal to the snapshot size.\n\t\t\tThis parameter maps 1:1 with the Size parameter of the CreateVolume API in the Amazon EC2 API Reference.

\n

The following are the supported volume size values for each volume type.

\n
    \n
  • \n

    \n gp2 and gp3: 1-16,384

    \n
  • \n
  • \n

    \n io1 and io2: 4-16,384

    \n
  • \n
  • \n

    \n st1 and sc1: 125-16,384

    \n
  • \n
  • \n

    \n standard: 1-1,024

    \n
  • \n
" } }, "snapshotId": { "target": "com.amazonaws.ecs#EBSSnapshotId", "traits": { - "smithy.api#documentation": "

The snapshot that Amazon ECS uses to create the volume. You must specify either a snapshot ID or a volume\n\t\t\tsize. This parameter maps 1:1 with the SnapshotId parameter of the CreateVolume\n\t\t\t\tAPI in the Amazon EC2 API Reference.

" + "smithy.api#documentation": "

The snapshot that Amazon ECS uses to create the volume. You must specify either a snapshot\n\t\t\tID or a volume size. This parameter maps 1:1 with the SnapshotId parameter\n\t\t\tof the CreateVolume API in\n\t\t\tthe Amazon EC2 API Reference.

" } }, "iops": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The number of I/O operations per second (IOPS). For gp3, io1, and\n\t\t\t\tio2 volumes, this represents the number of IOPS that are provisioned for the volume.\n\t\t\tFor gp2 volumes, this represents the baseline performance of the volume and the rate at\n\t\t\twhich the volume accumulates I/O credits for bursting.

\n

The following are the supported values for each volume type.

\n
    \n
  • \n

    \n gp3: 3,000 - 16,000 IOPS

    \n
  • \n
  • \n

    \n io1: 100 - 64,000 IOPS

    \n
  • \n
  • \n

    \n io2: 100 - 256,000 IOPS

    \n
  • \n
\n

This parameter is required for io1 and io2 volume types. The default for\n\t\t\t\tgp3 volumes is 3,000 IOPS. This parameter is not supported for\n\t\t\t\tst1, sc1, or standard volume types.

\n

This parameter maps 1:1 with the Iops parameter of the CreateVolume API in the\n\t\t\t\tAmazon EC2 API Reference.

" + "smithy.api#documentation": "

The number of I/O operations per second (IOPS). For gp3,\n\t\t\tio1, and io2 volumes, this represents the number of IOPS that\n\t\t\tare provisioned for the volume. For gp2 volumes, this represents the\n\t\t\tbaseline performance of the volume and the rate at which the volume accumulates I/O\n\t\t\tcredits for bursting.

\n

The following are the supported values for each volume type.

\n
    \n
  • \n

    \n gp3: 3,000 - 16,000 IOPS

    \n
  • \n
  • \n

    \n io1: 100 - 64,000 IOPS

    \n
  • \n
  • \n

    \n io2: 100 - 256,000 IOPS

    \n
  • \n
\n

This parameter is required for io1 and io2 volume types. The\n\t\t\tdefault for gp3 volumes is 3,000 IOPS. This parameter is not\n\t\t\tsupported for st1, sc1, or standard volume\n\t\t\ttypes.

\n

This parameter maps 1:1 with the Iops parameter of the CreateVolume API in the Amazon EC2 API Reference.

" } }, "throughput": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The throughput to provision for a volume, in MiB/s, with a maximum of 1,000 MiB/s. This parameter\n\t\t\tmaps 1:1 with the Throughput parameter of the CreateVolume API in the\n\t\t\t\tAmazon EC2 API Reference.

\n \n

This parameter is only supported for the gp3 volume type.

\n
" + "smithy.api#documentation": "

The throughput to provision for a volume, in MiB/s, with a maximum of 1,000 MiB/s.\n\t\t\tThis parameter maps 1:1 with the Throughput parameter of the CreateVolume API in the Amazon EC2 API Reference.

\n \n

This parameter is only supported for the gp3 volume type.

\n
" } }, "tagSpecifications": { "target": "com.amazonaws.ecs#EBSTagSpecifications", "traits": { - "smithy.api#documentation": "

The tags to apply to the volume. Amazon ECS applies service-managed tags by default. This parameter maps\n\t\t\t1:1 with the TagSpecifications.N parameter of the CreateVolume API in the\n\t\t\t\tAmazon EC2 API Reference.

" + "smithy.api#documentation": "

The tags to apply to the volume. Amazon ECS applies service-managed tags by default. This\n\t\t\tparameter maps 1:1 with the TagSpecifications.N parameter of the CreateVolume API in the Amazon EC2 API Reference.

" } }, "roleArn": { "target": "com.amazonaws.ecs#IAMRoleArn", "traits": { - "smithy.api#documentation": "

The ARN of the IAM role to associate with this volume. This is the Amazon ECS infrastructure IAM role\n\t\t\tthat is used to manage your Amazon Web Services infrastructure. We recommend using the Amazon ECS-managed\n\t\t\t\tAmazonECSInfrastructureRolePolicyForVolumes IAM policy with this role. For more\n\t\t\tinformation, see Amazon ECS infrastructure IAM\n\t\t\t\trole in the Amazon ECS Developer Guide.

", + "smithy.api#documentation": "

The ARN of the IAM role to associate with this volume. This is the Amazon ECS\n\t\t\tinfrastructure IAM role that is used to manage your Amazon Web Services infrastructure. We recommend\n\t\t\tusing the Amazon ECS-managed AmazonECSInfrastructureRolePolicyForVolumes IAM\n\t\t\tpolicy with this role. For more information, see Amazon ECS\n\t\t\t\tinfrastructure IAM role in the Amazon ECS Developer\n\t\t\tGuide.

", "smithy.api#required": {} } }, "filesystemType": { "target": "com.amazonaws.ecs#TaskFilesystemType", "traits": { - "smithy.api#documentation": "

The filesystem type for the volume. For volumes created from a snapshot, you must specify\n\t\t\tthe same filesystem type that the volume was using when the snapshot was created. If\n\t\t\tthere is a filesystem type mismatch, the task will fail to start.

\n

The available Linux filesystem types are\u2028 ext3, ext4, and\n\t\t\t\txfs. If no value is specified, the xfs filesystem type is\n\t\t\tused by default.

\n

The available Windows filesystem types are NTFS.

" + "smithy.api#documentation": "

The filesystem type for the volume. For volumes created from a snapshot, you must\n\t\t\tspecify the same filesystem type that the volume was using when the snapshot was\n\t\t\tcreated. If there is a filesystem type mismatch, the task will fail to start.

\n

The available Linux filesystem types are\u2028 ext3, ext4, and\n\t\t\t\txfs. If no value is specified, the xfs filesystem type is\n\t\t\tused by default.

\n

The available Windows filesystem types are NTFS.

" } } }, "traits": { - "smithy.api#documentation": "

The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf. These\n\t\t\tsettings are used to create each Amazon EBS volume, with one volume created for each task in\n\t\t\tthe service. For information about the supported launch types and operating systems, see Supported operating systems and launch types\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

\n

Many of these parameters map 1:1 with the Amazon EBS CreateVolume API request\n\t\t\tparameters.

" + "smithy.api#documentation": "

The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf.\n\t\t\tThese settings are used to create each Amazon EBS volume, with one volume created for each\n\t\t\ttask in the service. For information about the supported launch types and operating\n\t\t\tsystems, see Supported operating systems and launch types in the Amazon Elastic Container Service\n\t\t\t\tDeveloper Guide.

\n

Many of these parameters map 1:1 with the Amazon EBS CreateVolume API request\n\t\t\tparameters.

" } }, "com.amazonaws.ecs#ServiceNotActiveException": { @@ -11030,7 +11036,7 @@ } }, "traits": { - "smithy.api#documentation": "

The specified service isn't active. You can't update a service that's inactive. If you have\n\t\t\tpreviously deleted a service, you can re-create it with CreateService.

", + "smithy.api#documentation": "

The specified service isn't active. You can't update a service that's inactive. If you\n\t\t\thave previously deleted a service, you can re-create it with CreateService.

", "smithy.api#error": "client" } }, @@ -11045,7 +11051,7 @@ } }, "traits": { - "smithy.api#documentation": "

The specified service wasn't found. You can view your available services with ListServices. Amazon ECS services are cluster specific and Region specific.

", + "smithy.api#documentation": "

The specified service wasn't found. You can view your available services with ListServices. Amazon ECS services are cluster specific and Region\n\t\t\tspecific.

", "smithy.api#error": "client" } }, @@ -11061,30 +11067,30 @@ "registryArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the service registry. The currently supported service registry is Cloud Map. For more\n\t\t\tinformation, see CreateService.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the service registry. The currently supported service registry is\n\t\t\tCloud Map. For more information, see CreateService.

" } }, "port": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The port value used if your service discovery service specified an SRV record. This field might be\n\t\t\tused if both the awsvpc network mode and SRV records are used.

" + "smithy.api#documentation": "

The port value used if your service discovery service specified an SRV record. This\n\t\t\tfield might be used if both the awsvpc network mode and SRV records are\n\t\t\tused.

" } }, "containerName": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The container name value to be used for your service discovery service. It's already specified in the\n\t\t\ttask definition. If the task definition that your service task specifies uses the bridge\n\t\t\tor host network mode, you must specify a containerName and\n\t\t\t\tcontainerPort combination from the task definition. If the task definition that your\n\t\t\tservice task specifies uses the awsvpc network mode and a type SRV DNS record is used, you\n\t\t\tmust specify either a containerName and containerPort combination or a\n\t\t\t\tport value. However, you can't specify both.

" + "smithy.api#documentation": "

The container name value to be used for your service discovery service. It's already\n\t\t\tspecified in the task definition. If the task definition that your service task\n\t\t\tspecifies uses the bridge or host network mode, you must\n\t\t\tspecify a containerName and containerPort combination from the\n\t\t\ttask definition. If the task definition that your service task specifies uses the\n\t\t\t\tawsvpc network mode and a type SRV DNS record is used, you must specify\n\t\t\teither a containerName and containerPort combination or a\n\t\t\t\tport value. However, you can't specify both.

" } }, "containerPort": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The port value to be used for your service discovery service. It's already specified in the task\n\t\t\tdefinition. If the task definition your service task specifies uses the bridge or\n\t\t\t\thost network mode, you must specify a containerName and\n\t\t\t\tcontainerPort combination from the task definition. If the task definition your\n\t\t\tservice task specifies uses the awsvpc network mode and a type SRV DNS record is used, you\n\t\t\tmust specify either a containerName and containerPort combination or a\n\t\t\t\tport value. However, you can't specify both.

" + "smithy.api#documentation": "

The port value to be used for your service discovery service. It's already specified\n\t\t\tin the task definition. If the task definition your service task specifies uses the\n\t\t\t\tbridge or host network mode, you must specify a\n\t\t\t\tcontainerName and containerPort combination from the task\n\t\t\tdefinition. If the task definition your service task specifies uses the\n\t\t\t\tawsvpc network mode and a type SRV DNS record is used, you must specify\n\t\t\teither a containerName and containerPort combination or a\n\t\t\t\tport value. However, you can't specify both.

" } } }, "traits": { - "smithy.api#documentation": "

The details for the service registry.

\n

Each service may be associated with one service registry. Multiple service registries for each\n\t\t\tservice are not supported.

\n

When you add, update, or remove the service registries configuration, Amazon ECS starts a new deployment.\n\t\t\tNew tasks are registered and deregistered to the updated service registry configuration.

" + "smithy.api#documentation": "

The details for the service registry.

\n

Each service may be associated with one service registry. Multiple service registries\n\t\t\tfor each service are not supported.

\n

When you add, update, or remove the service registries configuration, Amazon ECS starts a\n\t\t\tnew deployment. New tasks are registered and deregistered to the updated service\n\t\t\tregistry configuration.

" } }, "com.amazonaws.ecs#ServiceRevision": { @@ -11181,7 +11187,7 @@ "createdAt": { "target": "com.amazonaws.ecs#Timestamp", "traits": { - "smithy.api#documentation": "

The time that the service revision was created. The format is yyyy-mm-dd HH:mm:ss.SSSSS.

" + "smithy.api#documentation": "

The time that the service revision was created. The format is yyyy-mm-dd\n\t\t\tHH:mm:ss.SSSSS.

" } }, "vpcLatticeConfigurations": { @@ -11192,7 +11198,7 @@ } }, "traits": { - "smithy.api#documentation": "

Information about the service revision.

\n

A service revision contains a record of the workload configuration Amazon ECS is attempting to deploy. Whenever you create or deploy a service, Amazon ECS automatically creates and captures the configuration that you're trying to deploy in the service revision. For information\n\t\t\tabout service revisions, see Amazon ECS service revisions\n\t\t\tin the \n Amazon Elastic Container Service Developer Guide\n .

" + "smithy.api#documentation": "

Information about the service revision.

\n

A service revision contains a record of the workload configuration Amazon ECS is attempting\n\t\t\tto deploy. Whenever you create or deploy a service, Amazon ECS automatically creates and\n\t\t\tcaptures the configuration that you're trying to deploy in the service revision. For\n\t\t\tinformation about service revisions, see Amazon ECS service\n\t\t\t\trevisions in the \n Amazon Elastic Container Service Developer Guide\n .

" } }, "com.amazonaws.ecs#ServiceRevisionSummary": { @@ -11227,7 +11233,7 @@ } }, "traits": { - "smithy.api#documentation": "

The information about the number of requested, pending, and running tasks for a service revision.

" + "smithy.api#documentation": "

The information about the number of requested, pending, and running tasks for a\n\t\t\tservice revision.

" } }, "com.amazonaws.ecs#ServiceRevisions": { @@ -11248,19 +11254,19 @@ "name": { "target": "com.amazonaws.ecs#ECSVolumeName", "traits": { - "smithy.api#documentation": "

The name of the volume. This value must match the volume name from the Volume object in\n\t\t\tthe task definition.

", + "smithy.api#documentation": "

The name of the volume. This value must match the volume name from the\n\t\t\t\tVolume object in the task definition.

", "smithy.api#required": {} } }, "managedEBSVolume": { "target": "com.amazonaws.ecs#ServiceManagedEBSVolumeConfiguration", "traits": { - "smithy.api#documentation": "

The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf. These settings\n\t\t\tare used to create each Amazon EBS volume, with one volume created for each task in the service. The Amazon EBS\n\t\t\tvolumes are visible in your account in the Amazon EC2 console once they are created.

" + "smithy.api#documentation": "

The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf.\n\t\t\tThese settings are used to create each Amazon EBS volume, with one volume created for each\n\t\t\ttask in the service. The Amazon EBS volumes are visible in your account in the Amazon EC2 console\n\t\t\tonce they are created.

" } } }, "traits": { - "smithy.api#documentation": "

The configuration for a volume specified in the task definition as a volume that is configured at\n\t\t\tlaunch time. Currently, the only supported volume type is an Amazon EBS volume.

" + "smithy.api#documentation": "

The configuration for a volume specified in the task definition as a volume that is\n\t\t\tconfigured at launch time. Currently, the only supported volume type is an Amazon EBS\n\t\t\tvolume.

" } }, "com.amazonaws.ecs#ServiceVolumeConfigurations": { @@ -11287,13 +11293,13 @@ "streamUrl": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

A URL to the managed agent on the container that the SSM Session Manager client uses to send commands\n\t\t\tand receive output from the container.

" + "smithy.api#documentation": "

A URL to the managed agent on the container that the SSM Session Manager client uses\n\t\t\tto send commands and receive output from the container.

" } }, "tokenValue": { "target": "com.amazonaws.ecs#SensitiveString", "traits": { - "smithy.api#documentation": "

An encrypted token value containing session and caller information. It's used to authenticate the\n\t\t\tconnection to the container.

" + "smithy.api#documentation": "

An encrypted token value containing session and caller information. It's used to\n\t\t\tauthenticate the connection to the container.

" } } }, @@ -11319,13 +11325,13 @@ "principalArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The ARN of the principal. It can be a user, role, or the root user. If this field is omitted, the\n\t\t\tauthenticated user is assumed.

" + "smithy.api#documentation": "

The ARN of the principal. It can be a user, role, or the root user. If this field is\n\t\t\tomitted, the authenticated user is assumed.

" } }, "type": { "target": "com.amazonaws.ecs#SettingType", "traits": { - "smithy.api#documentation": "

Indicates whether Amazon Web Services manages the account setting, or if the user manages it.

\n

\n aws_managed account settings are read-only, as Amazon Web Services manages such on the customer's\n\t\t\tbehalf. Currently, the guardDutyActivate account setting is the only one Amazon Web Services\n\t\t\tmanages.

" + "smithy.api#documentation": "

Indicates whether Amazon Web Services manages the account setting, or if the user manages it.

\n

\n aws_managed account settings are read-only, as Amazon Web Services manages such on the\n\t\t\tcustomer's behalf. Currently, the guardDutyActivate account setting is the\n\t\t\tonly one Amazon Web Services manages.

" } } }, @@ -11475,7 +11481,7 @@ } ], "traits": { - "smithy.api#documentation": "

Starts a new task from the specified task definition on the specified container instance or\n\t\t\tinstances.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n \n

Amazon Elastic Inference (EI) is no longer available to customers.

\n
\n

Alternatively, you can useRunTask to place tasks for you. For more information, see\n\t\t\t\tScheduling\n\t\t\t\tTasks in the Amazon Elastic Container Service Developer Guide.

\n

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a\n\t\t\tservice. For more infomation, see Amazon EBS\n\t\t\t\tvolumes in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Starts a new task from the specified task definition on the specified container\n\t\t\tinstance or instances.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n \n

Amazon Elastic Inference (EI) is no longer available to customers.

\n
\n

Alternatively, you can useRunTask to place tasks for you. For more\n\t\t\tinformation, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

\n

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or\n\t\t\tupdating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#StartTaskRequest": { @@ -11490,7 +11496,7 @@ "containerInstances": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The container instance IDs or full ARN entries for the container instances where you would like to\n\t\t\tplace your task. You can specify up to 10 container instances.

", + "smithy.api#documentation": "

The container instance IDs or full ARN entries for the container instances where you\n\t\t\twould like to place your task. You can specify up to 10 container instances.

", "smithy.api#required": {} } }, @@ -11498,20 +11504,20 @@ "target": "com.amazonaws.ecs#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Specifies whether to use Amazon ECS managed tags for the task. For more information, see Tagging Your Amazon ECS\n\t\t\t\tResources in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Specifies whether to use Amazon ECS managed tags for the task. For more information, see\n\t\t\t\tTagging Your Amazon ECS\n\t\t\t\tResources in the Amazon Elastic Container Service Developer Guide.

" } }, "enableExecuteCommand": { "target": "com.amazonaws.ecs#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Whether or not the execute command functionality is turned on for the task. If true,\n\t\t\tthis turns on the execute command functionality on all containers in the task.

" + "smithy.api#documentation": "

Whether or not the execute command functionality is turned on for the task. If\n\t\t\t\ttrue, this turns on the execute command functionality on all containers\n\t\t\tin the task.

" } }, "group": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of the task group to associate with the task. The default value is the family name of the\n\t\t\ttask definition (for example, family:my-family-name).

" + "smithy.api#documentation": "

The name of the task group to associate with the task. The default value is the family\n\t\t\tname of the task definition (for example, family:my-family-name).

" } }, "networkConfiguration": { @@ -11523,13 +11529,13 @@ "overrides": { "target": "com.amazonaws.ecs#TaskOverride", "traits": { - "smithy.api#documentation": "

A list of container overrides in JSON format that specify the name of a container in the specified\n\t\t\ttask definition and the overrides it receives. You can override the default command for a container\n\t\t\t(that's specified in the task definition or Docker image) with a command override. You can\n\t\t\talso override existing environment variables (that are specified in the task definition or Docker\n\t\t\timage) on a container or add new environment variables to it with an environment\n\t\t\toverride.

\n \n

A total of 8192 characters are allowed for overrides. This limit includes the JSON formatting\n\t\t\t\tcharacters of the override structure.

\n
" + "smithy.api#documentation": "

A list of container overrides in JSON format that specify the name of a container in\n\t\t\tthe specified task definition and the overrides it receives. You can override the\n\t\t\tdefault command for a container (that's specified in the task definition or Docker\n\t\t\timage) with a command override. You can also override existing environment\n\t\t\tvariables (that are specified in the task definition or Docker image) on a container or\n\t\t\tadd new environment variables to it with an environment override.

\n \n

A total of 8192 characters are allowed for overrides. This limit includes the JSON\n\t\t\t\tformatting characters of the override structure.

\n
" } }, "propagateTags": { "target": "com.amazonaws.ecs#PropagateTags", "traits": { - "smithy.api#documentation": "

Specifies whether to propagate the tags from the task definition or the service to the task. If no\n\t\t\tvalue is specified, the tags aren't propagated.

" + "smithy.api#documentation": "

Specifies whether to propagate the tags from the task definition or the service to the\n\t\t\ttask. If no value is specified, the tags aren't propagated.

" } }, "referenceId": { @@ -11541,26 +11547,26 @@ "startedBy": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

An optional tag specified when a task is started. For example, if you automatically trigger\n\t\t\ta task to run a batch process job, you could apply a unique identifier for that job to\n\t\t\tyour task with the startedBy parameter. You can then identify which tasks\n\t\t\tbelong to that job by filtering the results of a ListTasks call with\n\t\t\tthe startedBy value. Up to 36 letters (uppercase and lowercase), numbers,\n\t\t\thyphens (-), forward slash (/), and underscores (_) are allowed.

\n

If a task is started by an Amazon ECS service, the startedBy parameter\n\t\t\tcontains the deployment ID of the service that starts it.

" + "smithy.api#documentation": "

An optional tag specified when a task is started. For example, if you automatically\n\t\t\ttrigger a task to run a batch process job, you could apply a unique identifier for that\n\t\t\tjob to your task with the startedBy parameter. You can then identify which\n\t\t\ttasks belong to that job by filtering the results of a ListTasks call with\n\t\t\tthe startedBy value. Up to 36 letters (uppercase and lowercase), numbers,\n\t\t\thyphens (-), forward slash (/), and underscores (_) are allowed.

\n

If a task is started by an Amazon ECS service, the startedBy parameter\n\t\t\tcontains the deployment ID of the service that starts it.

" } }, "tags": { "target": "com.amazonaws.ecs#Tags", "traits": { - "smithy.api#documentation": "

The metadata that you apply to the task to help you categorize and organize them. Each tag consists\n\t\t\tof a key and an optional value, both of which you define.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" + "smithy.api#documentation": "

The metadata that you apply to the task to help you categorize and organize them. Each\n\t\t\ttag consists of a key and an optional value, both of which you define.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" } }, "taskDefinition": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The family and revision (family:revision) or full ARN of the\n\t\t\ttask definition to start. If a revision isn't specified, the latest ACTIVE\n\t\t\trevision is used.

", + "smithy.api#documentation": "

The family and revision (family:revision) or\n\t\t\tfull ARN of the task definition to start. If a revision isn't specified,\n\t\t\tthe latest ACTIVE revision is used.

", "smithy.api#required": {} } }, "volumeConfigurations": { "target": "com.amazonaws.ecs#TaskVolumeConfigurations", "traits": { - "smithy.api#documentation": "

The details of the volume that was configuredAtLaunch. You can configure the size,\n\t\t\tvolumeType, IOPS, throughput, snapshot and encryption in TaskManagedEBSVolumeConfiguration. The name of the volume must match the\n\t\t\t\tname from the task definition.

" + "smithy.api#documentation": "

The details of the volume that was configuredAtLaunch. You can configure\n\t\t\tthe size, volumeType, IOPS, throughput, snapshot and encryption in TaskManagedEBSVolumeConfiguration. The name of the volume must\n\t\t\tmatch the name from the task definition.

" } } }, @@ -11574,7 +11580,7 @@ "tasks": { "target": "com.amazonaws.ecs#Tasks", "traits": { - "smithy.api#documentation": "

A full description of the tasks that were started. Each task that was successfully placed on your\n\t\t\tcontainer instances is described.

" + "smithy.api#documentation": "

A full description of the tasks that were started. Each task that was successfully\n\t\t\tplaced on your container instances is described.

" } }, "failures": { @@ -11617,7 +11623,7 @@ } ], "traits": { - "smithy.api#documentation": "

Stops a running task. Any tags associated with the task will be deleted.

\n

When you call StopTask on a task, the equivalent of docker stop is issued\n\t\t\tto the containers running in the task. This results in a SIGTERM value and a default\n\t\t\t30-second timeout, after which the SIGKILL value is sent and the containers are forcibly\n\t\t\tstopped. If the container handles the SIGTERM value gracefully and exits within 30 seconds\n\t\t\tfrom receiving it, no SIGKILL value is sent.

\n

For Windows containers, POSIX signals do not work and runtime stops the container by sending a\n\t\t\t\tCTRL_SHUTDOWN_EVENT. For more information, see Unable to react to graceful shutdown of (Windows)\n\t\t\t\tcontainer #25982 on GitHub.

\n \n

The default 30-second timeout can be configured on the Amazon ECS container agent with the\n\t\t\t\t\tECS_CONTAINER_STOP_TIMEOUT variable. For more information, see Amazon ECS\n\t\t\t\t\tContainer Agent Configuration in the Amazon Elastic Container Service Developer Guide.

\n
" + "smithy.api#documentation": "

Stops a running task. Any tags associated with the task will be deleted.

\n

When you call StopTask on a task, the equivalent of docker\n\t\t\t\tstop is issued to the containers running in the task. This results in a\n\t\t\t\tSIGTERM value and a default 30-second timeout, after which the\n\t\t\t\tSIGKILL value is sent and the containers are forcibly stopped. If the\n\t\t\tcontainer handles the SIGTERM value gracefully and exits within 30 seconds\n\t\t\tfrom receiving it, no SIGKILL value is sent.

\n

For Windows containers, POSIX signals do not work and runtime stops the container by\n\t\t\tsending a CTRL_SHUTDOWN_EVENT. For more information, see Unable to react to graceful shutdown\n\t\t\t\tof (Windows) container #25982 on GitHub.

\n \n

The default 30-second timeout can be configured on the Amazon ECS container agent with\n\t\t\t\tthe ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see\n\t\t\t\t\tAmazon ECS Container Agent Configuration in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.

\n
" } }, "com.amazonaws.ecs#StopTaskRequest": { @@ -11639,7 +11645,7 @@ "reason": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

An optional message specified when a task is stopped. For example, if you're using a custom\n\t\t\tscheduler, you can use this parameter to specify the reason for stopping the task here, and the message\n\t\t\tappears in subsequent DescribeTasks> API operations on\n\t\t\tthis task.

" + "smithy.api#documentation": "

An optional message specified when a task is stopped. For example, if you're using a\n\t\t\tcustom scheduler, you can use this parameter to specify the reason for stopping the task\n\t\t\there, and the message appears in subsequent DescribeTasks>\n\t\t\tAPI operations on this task.

" } } }, @@ -11711,7 +11717,7 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full ARN of the cluster that hosts the container instance the attachment belongs\n\t\t\tto.

" + "smithy.api#documentation": "

The short name or full ARN of the cluster that hosts the container instance the\n\t\t\tattachment belongs to.

" } }, "attachments": { @@ -11953,12 +11959,12 @@ "value": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The namespaced kernel parameter to set a value for.

\n

Valid IPC namespace values: \"kernel.msgmax\" | \"kernel.msgmnb\" | \"kernel.msgmni\" | \"kernel.sem\"\n\t\t\t\t| \"kernel.shmall\" | \"kernel.shmmax\" | \"kernel.shmmni\" | \"kernel.shm_rmid_forced\", and\n\t\t\t\tSysctls that start with \"fs.mqueue.*\"\n

\n

Valid network namespace values: Sysctls that start with \"net.*\"\n

\n

All of these values are supported by Fargate.

" + "smithy.api#documentation": "

The namespaced kernel parameter to set a value for.

\n

Valid IPC namespace values: \"kernel.msgmax\" | \"kernel.msgmnb\" | \"kernel.msgmni\"\n\t\t\t\t| \"kernel.sem\" | \"kernel.shmall\" | \"kernel.shmmax\" | \"kernel.shmmni\" |\n\t\t\t\t\"kernel.shm_rmid_forced\", and Sysctls that start with\n\t\t\t\t\"fs.mqueue.*\"\n

\n

Valid network namespace values: Sysctls that start with\n\t\t\t\t\"net.*\"\n

\n

All of these values are supported by Fargate.

" } } }, "traits": { - "smithy.api#documentation": "

A list of namespaced kernel parameters to set in the container. This parameter maps to\n\t\t\t\tSysctls in the docker container create command and the --sysctl option to\n\t\t\tdocker run. For example, you can configure net.ipv4.tcp_keepalive_time setting to maintain\n\t\t\tlonger lived connections.

\n

We don't recommend that you specify network-related systemControls parameters for\n\t\t\tmultiple containers in a single task that also uses either the awsvpc or host\n\t\t\tnetwork mode. Doing this has the following disadvantages:

\n
    \n
  • \n

    For tasks that use the awsvpc network mode including Fargate, if you set\n\t\t\t\t\t\tsystemControls for any container, it applies to all containers in the task. If\n\t\t\t\t\tyou set different systemControls for multiple containers in a single task, the\n\t\t\t\t\tcontainer that's started last determines which systemControls take effect.

    \n
  • \n
  • \n

    For tasks that use the host network mode, the network namespace\n\t\t\t\t\t\tsystemControls aren't supported.

    \n
  • \n
\n

If you're setting an IPC resource namespace to use for the containers in the task, the following\n\t\t\tconditions apply to your system controls. For more information, see IPC\n\t\t\t\tmode.

\n
    \n
  • \n

    For tasks that use the host IPC mode, IPC namespace systemControls\n\t\t\t\t\taren't supported.

    \n
  • \n
  • \n

    For tasks that use the task IPC mode, IPC namespace systemControls\n\t\t\t\t\tvalues apply to all containers within a task.

    \n
  • \n
\n \n

This parameter is not supported for Windows containers.

\n
\n \n

This parameter is only supported for tasks that are hosted on\n Fargate if the tasks are using platform version 1.4.0 or later\n (Linux). This isn't supported for Windows containers on\n Fargate.

\n
" + "smithy.api#documentation": "

A list of namespaced kernel parameters to set in the container. This parameter maps to\n\t\t\t\tSysctls in the docker container create command and the\n\t\t\t\t--sysctl option to docker run. For example, you can configure\n\t\t\t\tnet.ipv4.tcp_keepalive_time setting to maintain longer lived\n\t\t\tconnections.

\n

We don't recommend that you specify network-related systemControls\n\t\t\tparameters for multiple containers in a single task that also uses either the\n\t\t\t\tawsvpc or host network mode. Doing this has the following\n\t\t\tdisadvantages:

\n
    \n
  • \n

    For tasks that use the awsvpc network mode including Fargate,\n\t\t\t\t\tif you set systemControls for any container, it applies to all\n\t\t\t\t\tcontainers in the task. If you set different systemControls for\n\t\t\t\t\tmultiple containers in a single task, the container that's started last\n\t\t\t\t\tdetermines which systemControls take effect.

    \n
  • \n
  • \n

    For tasks that use the host network mode, the network namespace\n\t\t\t\t\t\tsystemControls aren't supported.

    \n
  • \n
\n

If you're setting an IPC resource namespace to use for the containers in the task, the\n\t\t\tfollowing conditions apply to your system controls. For more information, see IPC mode.

\n
    \n
  • \n

    For tasks that use the host IPC mode, IPC namespace\n\t\t\t\t\t\tsystemControls aren't supported.

    \n
  • \n
  • \n

    For tasks that use the task IPC mode, IPC namespace\n\t\t\t\t\t\tsystemControls values apply to all containers within a\n\t\t\t\t\ttask.

    \n
  • \n
\n \n

This parameter is not supported for Windows containers.

\n
\n \n

This parameter is only supported for tasks that are hosted on\n Fargate if the tasks are using platform version 1.4.0 or later\n (Linux). This isn't supported for Windows containers on\n Fargate.

\n
" } }, "com.amazonaws.ecs#SystemControls": { @@ -11973,18 +11979,18 @@ "key": { "target": "com.amazonaws.ecs#TagKey", "traits": { - "smithy.api#documentation": "

One part of a key-value pair that make up a tag. A key is a general label that acts like\n\t\t\ta category for more specific tag values.

" + "smithy.api#documentation": "

One part of a key-value pair that make up a tag. A key is a general label\n\t\t\tthat acts like a category for more specific tag values.

" } }, "value": { "target": "com.amazonaws.ecs#TagValue", "traits": { - "smithy.api#documentation": "

The optional part of a key-value pair that make up a tag. A value acts as a descriptor\n\t\t\twithin a tag category (key).

" + "smithy.api#documentation": "

The optional part of a key-value pair that make up a tag. A value acts as\n\t\t\ta descriptor within a tag category (key).

" } } }, "traits": { - "smithy.api#documentation": "

The metadata that you apply to a resource to help you categorize and organize them. Each tag consists\n\t\t\tof a key and an optional value. You define them.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" + "smithy.api#documentation": "

The metadata that you apply to a resource to help you categorize and organize them.\n\t\t\tEach tag consists of a key and an optional value. You define them.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" } }, "com.amazonaws.ecs#TagKey": { @@ -12029,7 +12035,7 @@ } ], "traits": { - "smithy.api#documentation": "

Associates the specified tags to a resource with the specified resourceArn. If existing\n\t\t\ttags on a resource aren't specified in the request parameters, they aren't changed. When a resource is\n\t\t\tdeleted, the tags that are associated with that resource are deleted as well.

", + "smithy.api#documentation": "

Associates the specified tags to a resource with the specified\n\t\t\t\tresourceArn. If existing tags on a resource aren't specified in the\n\t\t\trequest parameters, they aren't changed. When a resource is deleted, the tags that are\n\t\t\tassociated with that resource are deleted as well.

", "smithy.api#examples": [ { "title": "To tag a cluster.", @@ -12054,7 +12060,7 @@ "resourceArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource to add tags to. Currently, the supported resources are Amazon ECS capacity\n\t\t\tproviders, tasks, services, task definitions, clusters, and container instances.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource to add tags to. Currently, the supported resources are\n\t\t\tAmazon ECS capacity providers, tasks, services, task definitions, clusters, and container\n\t\t\tinstances.

", "smithy.api#required": {} } }, @@ -12110,7 +12116,7 @@ } }, "traits": { - "smithy.api#documentation": "

The execute command cannot run. This error can be caused by any of the following configuration\n\t\t\tissues:

\n
    \n
  • \n

    Incorrect IAM permissions

    \n
  • \n
  • \n

    The SSM agent is not installed or is not running

    \n
  • \n
  • \n

    There is an interface Amazon VPC endpoint for Amazon ECS, but there is not one for Systems\n\t\t\t\t\tManager Session Manager

    \n
  • \n
\n

For information about how to troubleshoot the issues, see Troubleshooting issues with ECS Exec in\n\t\t\tthe Amazon Elastic Container Service Developer Guide.

", + "smithy.api#documentation": "

The execute command cannot run. This error can be caused by any of the following\n\t\t\tconfiguration issues:

\n
    \n
  • \n

    Incorrect IAM permissions

    \n
  • \n
  • \n

    The SSM agent is not installed or is not running

    \n
  • \n
  • \n

    There is an interface Amazon VPC endpoint for Amazon ECS, but there is not one for\n\t\t\t\t\tSystems Manager Session Manager

    \n
  • \n
\n

For information about how to troubleshoot the issues, see Troubleshooting issues with ECS\n\t\t\t\tExec in the Amazon Elastic Container Service Developer Guide.

", "smithy.api#error": "client" } }, @@ -12125,7 +12131,7 @@ } }, "traits": { - "smithy.api#documentation": "

The specified target wasn't found. You can view your available container instances with ListContainerInstances. Amazon ECS container instances are cluster-specific and\n\t\t\tRegion-specific.

", + "smithy.api#documentation": "

The specified target wasn't found. You can view your available container instances\n\t\t\twith ListContainerInstances. Amazon ECS container instances are cluster-specific and\n\t\t\tRegion-specific.

", "smithy.api#error": "client" } }, @@ -12146,7 +12152,7 @@ "attachments": { "target": "com.amazonaws.ecs#Attachments", "traits": { - "smithy.api#documentation": "

The Elastic Network Adapter that's associated with the task if the task uses the awsvpc\n\t\t\tnetwork mode.

" + "smithy.api#documentation": "

The Elastic Network Adapter that's associated with the task if the task uses the\n\t\t\t\tawsvpc network mode.

" } }, "attributes": { @@ -12182,7 +12188,7 @@ "connectivityAt": { "target": "com.amazonaws.ecs#Timestamp", "traits": { - "smithy.api#documentation": "

The Unix timestamp for the time when the task last went into CONNECTED status.

" + "smithy.api#documentation": "

The Unix timestamp for the time when the task last went into CONNECTED\n\t\t\tstatus.

" } }, "containerInstanceArn": { @@ -12200,26 +12206,26 @@ "cpu": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The number of CPU units used by the task as expressed in a task definition. It can be expressed as an\n\t\t\tinteger using CPU units (for example, 1024). It can also be expressed as a string using\n\t\t\tvCPUs (for example, 1 vCPU or 1 vcpu). String values are converted to an\n\t\t\tinteger that indicates the CPU units when the task definition is registered.

\n

If you use the EC2 launch type, this field is optional. Supported values are between\n\t\t\t\t128 CPU units (0.125 vCPUs) and 10240 CPU units\n\t\t\t\t(10 vCPUs).

\n

If you use the Fargate launch type, this field is required. You must use one of the\n\t\t\tfollowing values. These values determine the range of supported values for the memory\n\t\t\tparameter:

\n

The CPU units cannot be less than 1 vCPU when you use Windows containers on\n\t\t\tFargate.

\n
    \n
  • \n

    256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)

    \n
  • \n
  • \n

    512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)

    \n
  • \n
  • \n

    1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)

    \n
  • \n
  • \n

    2048 (2 vCPU) - Available memory values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)

    \n
  • \n
  • \n

    4096 (4 vCPU) - Available memory values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)

    \n
  • \n
  • \n

    8192 (8 vCPU) - Available memory values: 16 GB and 60 GB in 4 GB increments

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
  • \n

    16384 (16vCPU) - Available memory values: 32GB and 120 GB in 8 GB increments

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The number of CPU units used by the task as expressed in a task definition. It can be\n\t\t\texpressed as an integer using CPU units (for example, 1024). It can also be\n\t\t\texpressed as a string using vCPUs (for example, 1 vCPU or 1\n\t\t\t\tvcpu). String values are converted to an integer that indicates the CPU units\n\t\t\twhen the task definition is registered.

\n

If you use the EC2 launch type, this field is optional. Supported values\n\t\t\tare between 128 CPU units (0.125 vCPUs) and 10240\n\t\t\tCPU units (10 vCPUs).

\n

If you use the Fargate launch type, this field is required. You must use\n\t\t\tone of the following values. These values determine the range of supported values for\n\t\t\tthe memory parameter:

\n

The CPU units cannot be less than 1 vCPU when you use Windows containers on\n\t\t\tFargate.

\n
    \n
  • \n

    256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)

    \n
  • \n
  • \n

    512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)

    \n
  • \n
  • \n

    1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)

    \n
  • \n
  • \n

    2048 (2 vCPU) - Available memory values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)

    \n
  • \n
  • \n

    4096 (4 vCPU) - Available memory values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)

    \n
  • \n
  • \n

    8192 (8 vCPU) - Available memory values: 16 GB and 60 GB in 4 GB increments

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
  • \n

    16384 (16vCPU) - Available memory values: 32GB and 120 GB in 8 GB increments

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
" } }, "createdAt": { "target": "com.amazonaws.ecs#Timestamp", "traits": { - "smithy.api#documentation": "

The Unix timestamp for the time when the task was created. More specifically, it's for the time when\n\t\t\tthe task entered the PENDING state.

" + "smithy.api#documentation": "

The Unix timestamp for the time when the task was created. More specifically, it's for\n\t\t\tthe time when the task entered the PENDING state.

" } }, "desiredStatus": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The desired status of the task. For more information, see Task Lifecycle.

" + "smithy.api#documentation": "

The desired status of the task. For more information, see Task\n\t\t\tLifecycle.

" } }, "enableExecuteCommand": { "target": "com.amazonaws.ecs#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Determines whether execute command functionality is turned on for this task. If true,\n\t\t\texecute command functionality is turned on all the containers in the task.

" + "smithy.api#documentation": "

Determines whether execute command functionality is turned on for this task. If\n\t\t\t\ttrue, execute command functionality is turned on all the containers in\n\t\t\tthe task.

" } }, "executionStoppedAt": { @@ -12237,7 +12243,7 @@ "healthStatus": { "target": "com.amazonaws.ecs#HealthStatus", "traits": { - "smithy.api#documentation": "

The health status for the task. It's determined by the health of the essential containers in the\n\t\t\ttask. If all essential containers in the task are reporting as HEALTHY, the task status\n\t\t\talso reports as HEALTHY. If any essential containers in the task are reporting as\n\t\t\t\tUNHEALTHY or UNKNOWN, the task status also reports as\n\t\t\t\tUNHEALTHY or UNKNOWN.

\n \n

The Amazon ECS container agent doesn't monitor or report on Docker health checks that are embedded in\n\t\t\t\ta container image and not specified in the container definition. For example, this includes those\n\t\t\t\tspecified in a parent image or from the image's Dockerfile. Health check parameters that are\n\t\t\t\tspecified in a container definition override any Docker health checks that are found in the\n\t\t\t\tcontainer image.

\n
" + "smithy.api#documentation": "

The health status for the task. It's determined by the health of the essential\n\t\t\tcontainers in the task. If all essential containers in the task are reporting as\n\t\t\t\tHEALTHY, the task status also reports as HEALTHY. If any\n\t\t\tessential containers in the task are reporting as UNHEALTHY or\n\t\t\t\tUNKNOWN, the task status also reports as UNHEALTHY or\n\t\t\t\tUNKNOWN.

\n \n

The Amazon ECS container agent doesn't monitor or report on Docker health checks that\n\t\t\t\tare embedded in a container image and not specified in the container definition. For\n\t\t\t\texample, this includes those specified in a parent image or from the image's\n\t\t\t\tDockerfile. Health check parameters that are specified in a container definition\n\t\t\t\toverride any Docker health checks that are found in the container image.

\n
" } }, "inferenceAccelerators": { @@ -12249,19 +12255,19 @@ "lastStatus": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The last known status for the task. For more information, see Task Lifecycle.

" + "smithy.api#documentation": "

The last known status for the task. For more information, see Task\n\t\t\t\tLifecycle.

" } }, "launchType": { "target": "com.amazonaws.ecs#LaunchType", "traits": { - "smithy.api#documentation": "

The infrastructure where your task runs on. For more information, see Amazon ECS launch\n\t\t\t\ttypes in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The infrastructure where your task runs on. For more information, see Amazon ECS\n\t\t\t\tlaunch types in the Amazon Elastic Container Service Developer Guide.

" } }, "memory": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The amount of memory (in MiB) that the task uses as expressed in a task definition. It can be\n\t\t\texpressed as an integer using MiB (for example, 1024). If it's expressed as a string using\n\t\t\tGB (for example, 1GB or 1 GB), it's converted to an integer indicating the\n\t\t\tMiB when the task definition is registered.

\n

If you use the EC2 launch type, this field is optional.

\n

If you use the Fargate launch type, this field is required. You must use one of the\n\t\t\tfollowing values. The value that you choose determines the range of supported values for the\n\t\t\t\tcpu parameter.

\n
    \n
  • \n

    512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU)

    \n
  • \n
  • \n

    1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU)

    \n
  • \n
  • \n

    2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU)

    \n
  • \n
  • \n

    Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU)

    \n
  • \n
  • \n

    Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU)

    \n
  • \n
  • \n

    Between 16 GB and 60 GB in 4 GB increments - Available cpu values: 8192 (8 vCPU)

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
  • \n

    Between 32GB and 120 GB in 8 GB increments - Available cpu values: 16384 (16 vCPU)

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The amount of memory (in MiB) that the task uses as expressed in a task definition. It\n\t\t\tcan be expressed as an integer using MiB (for example, 1024). If it's\n\t\t\texpressed as a string using GB (for example, 1GB or 1 GB),\n\t\t\tit's converted to an integer indicating the MiB when the task definition is\n\t\t\tregistered.

\n

If you use the EC2 launch type, this field is optional.

\n

If you use the Fargate launch type, this field is required. You must use\n\t\t\tone of the following values. The value that you choose determines the range of supported\n\t\t\tvalues for the cpu parameter.

\n
    \n
  • \n

    512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU)

    \n
  • \n
  • \n

    1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU)

    \n
  • \n
  • \n

    2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU)

    \n
  • \n
  • \n

    Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU)

    \n
  • \n
  • \n

    Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU)

    \n
  • \n
  • \n

    Between 16 GB and 60 GB in 4 GB increments - Available cpu values: 8192 (8 vCPU)

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
  • \n

    Between 32GB and 120 GB in 8 GB increments - Available cpu values: 16384 (16 vCPU)

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
" } }, "overrides": { @@ -12273,13 +12279,13 @@ "platformVersion": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The platform version where your task runs on. A platform version is only specified for tasks that use\n\t\t\tthe Fargate launch type. If you didn't specify one, the LATEST platform\n\t\t\tversion is used. For more information, see Fargate Platform Versions in\n\t\t\tthe Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The platform version where your task runs on. A platform version is only specified for\n\t\t\ttasks that use the Fargate launch type. If you didn't specify one, the\n\t\t\t\tLATEST platform version is used. For more information, see Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" } }, "platformFamily": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The operating system that your tasks are running on. A platform family is specified only for tasks\n\t\t\tthat use the Fargate launch type.

\n

All tasks that run as part of this service must use the same platformFamily value as\n\t\t\tthe service (for example, LINUX.).

" + "smithy.api#documentation": "

The operating system that your tasks are running on. A platform family is specified\n\t\t\tonly for tasks that use the Fargate launch type.

\n

All tasks that run as part of this service must use the same\n\t\t\t\tplatformFamily value as the service (for example,\n\t\t\tLINUX.).

" } }, "pullStartedAt": { @@ -12297,7 +12303,7 @@ "startedAt": { "target": "com.amazonaws.ecs#Timestamp", "traits": { - "smithy.api#documentation": "

The Unix timestamp for the time when the task started. More specifically, it's for the time when the\n\t\t\ttask transitioned from the PENDING state to the RUNNING state.

" + "smithy.api#documentation": "

The Unix timestamp for the time when the task started. More specifically, it's for the\n\t\t\ttime when the task transitioned from the PENDING state to the\n\t\t\t\tRUNNING state.

" } }, "startedBy": { @@ -12309,13 +12315,13 @@ "stopCode": { "target": "com.amazonaws.ecs#TaskStopCode", "traits": { - "smithy.api#documentation": "

The stop code indicating why a task was stopped. The stoppedReason might contain\n\t\t\tadditional details.

\n

For more information about stop code, see Stopped tasks error\n\t\t\t\tcodes in the Amazon ECS Developer Guide.

" + "smithy.api#documentation": "

The stop code indicating why a task was stopped. The stoppedReason might\n\t\t\tcontain additional details.

\n

For more information about stop code, see Stopped tasks\n\t\t\t\terror codes in the Amazon ECS Developer Guide.

" } }, "stoppedAt": { "target": "com.amazonaws.ecs#Timestamp", "traits": { - "smithy.api#documentation": "

The Unix timestamp for the time when the task was stopped. More specifically, it's for the time when\n\t\t\tthe task transitioned from the RUNNING state to the STOPPED state.

" + "smithy.api#documentation": "

The Unix timestamp for the time when the task was stopped. More specifically, it's for\n\t\t\tthe time when the task transitioned from the RUNNING state to the\n\t\t\t\tSTOPPED state.

" } }, "stoppedReason": { @@ -12327,13 +12333,13 @@ "stoppingAt": { "target": "com.amazonaws.ecs#Timestamp", "traits": { - "smithy.api#documentation": "

The Unix timestamp for the time when the task stops. More specifically, it's for the time when the\n\t\t\ttask transitions from the RUNNING state to STOPPING.

" + "smithy.api#documentation": "

The Unix timestamp for the time when the task stops. More specifically, it's for the\n\t\t\ttime when the task transitions from the RUNNING state to\n\t\t\t\tSTOPPING.

" } }, "tags": { "target": "com.amazonaws.ecs#Tags", "traits": { - "smithy.api#documentation": "

The metadata that you apply to the task to help you categorize and organize the task. Each tag\n\t\t\tconsists of a key and an optional value. You define both the key and value.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" + "smithy.api#documentation": "

The metadata that you apply to the task to help you categorize and organize the task.\n\t\t\tEach tag consists of a key and an optional value. You define both the key and\n\t\t\tvalue.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" } }, "taskArn": { @@ -12352,7 +12358,7 @@ "target": "com.amazonaws.ecs#Long", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The version counter for the task. Every time a task experiences a change that starts a CloudWatch event,\n\t\t\tthe version counter is incremented. If you replicate your Amazon ECS task state with CloudWatch Events, you can\n\t\t\tcompare the version of a task reported by the Amazon ECS API actions with the version reported in CloudWatch\n\t\t\tEvents for the task (inside the detail object) to verify that the version in your event\n\t\t\tstream is current.

" + "smithy.api#documentation": "

The version counter for the task. Every time a task experiences a change that starts a\n\t\t\tCloudWatch event, the version counter is incremented. If you replicate your Amazon ECS task state\n\t\t\twith CloudWatch Events, you can compare the version of a task reported by the Amazon ECS API\n\t\t\tactions with the version reported in CloudWatch Events for the task (inside the\n\t\t\t\tdetail object) to verify that the version in your event stream is\n\t\t\tcurrent.

" } }, "ephemeralStorage": { @@ -12384,19 +12390,19 @@ "containerDefinitions": { "target": "com.amazonaws.ecs#ContainerDefinitions", "traits": { - "smithy.api#documentation": "

A list of container definitions in JSON format that describe the different containers that make up\n\t\t\tyour task. For more information about container definition parameters and defaults, see Amazon ECS Task\n\t\t\t\tDefinitions in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

A list of container definitions in JSON format that describe the different containers\n\t\t\tthat make up your task. For more information about container definition parameters and\n\t\t\tdefaults, see Amazon ECS Task\n\t\t\t\tDefinitions in the Amazon Elastic Container Service Developer Guide.

" } }, "family": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of a family that this task definition is registered to. Up to 255 characters are allowed.\n\t\t\tLetters (both uppercase and lowercase letters), numbers, hyphens (-), and underscores (_) are\n\t\t\tallowed.

\n

A family groups multiple versions of a task definition. Amazon ECS gives the first task definition that\n\t\t\tyou registered to a family a revision number of 1. Amazon ECS gives sequential revision numbers to each task\n\t\t\tdefinition that you add.

" + "smithy.api#documentation": "

The name of a family that this task definition is registered to. Up to 255 characters\n\t\t\tare allowed. Letters (both uppercase and lowercase letters), numbers, hyphens (-), and\n\t\t\tunderscores (_) are allowed.

\n

A family groups multiple versions of a task definition. Amazon ECS gives the first task\n\t\t\tdefinition that you registered to a family a revision number of 1. Amazon ECS gives\n\t\t\tsequential revision numbers to each task definition that you add.

" } }, "taskRoleArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the Identity and Access Management role that grants containers in the task permission\n\t\t\tto call Amazon Web Services APIs on your behalf. For informationabout the required IAM roles for Amazon ECS, see IAM roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the Identity and Access Management role that grants containers in the\n\t\t\ttask permission to call Amazon Web Services APIs on your behalf. For informationabout the required\n\t\t\tIAM roles for Amazon ECS, see IAM\n\t\t\t\troles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

" } }, "executionRoleArn": { @@ -12415,13 +12421,13 @@ "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The revision of the task in a particular family. The revision is a version number of a task\n\t\t\tdefinition in a family. When you register a task definition for the first time, the revision is\n\t\t\t\t1. Each time that you register a new revision of a task definition in the same family,\n\t\t\tthe revision value always increases by one. This is even if you deregistered previous revisions in this\n\t\t\tfamily.

" + "smithy.api#documentation": "

The revision of the task in a particular family. The revision is a version number of a\n\t\t\ttask definition in a family. When you register a task definition for the first time, the\n\t\t\trevision is 1. Each time that you register a new revision of a task\n\t\t\tdefinition in the same family, the revision value always increases by one. This is even\n\t\t\tif you deregistered previous revisions in this family.

" } }, "volumes": { "target": "com.amazonaws.ecs#VolumeList", "traits": { - "smithy.api#documentation": "

The list of data volume definitions for the task. For more information, see Using data\n\t\t\t\tvolumes in tasks in the Amazon Elastic Container Service Developer Guide.

\n \n

The host and sourcePath parameters aren't supported for tasks run on\n\t\t\t\tFargate.

\n
" + "smithy.api#documentation": "

The list of data volume definitions for the task. For more information, see Using data volumes in tasks in the Amazon Elastic Container Service Developer Guide.

\n \n

The host and sourcePath parameters aren't supported for\n\t\t\t\ttasks run on Fargate.

\n
" } }, "status": { @@ -12433,7 +12439,7 @@ "requiresAttributes": { "target": "com.amazonaws.ecs#RequiresAttributes", "traits": { - "smithy.api#documentation": "

The container instance attributes required by your task. When an Amazon EC2 instance is registered to your\n\t\t\tcluster, the Amazon ECS container agent assigns some standard attributes to the instance. You can apply\n\t\t\tcustom attributes. These are specified as key-value pairs using the Amazon ECS console or the PutAttributes API. These attributes are used when determining task placement for tasks\n\t\t\thosted on Amazon EC2 instances. For more information, see Attributes\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

\n \n

This parameter isn't supported for tasks run on Fargate.

\n
" + "smithy.api#documentation": "

The container instance attributes required by your task. When an Amazon EC2 instance is\n\t\t\tregistered to your cluster, the Amazon ECS container agent assigns some standard attributes\n\t\t\tto the instance. You can apply custom attributes. These are specified as key-value pairs\n\t\t\tusing the Amazon ECS console or the PutAttributes\n\t\t\tAPI. These attributes are used when determining task placement for tasks hosted on Amazon EC2\n\t\t\tinstances. For more information, see Attributes in the Amazon Elastic Container Service Developer Guide.

\n \n

This parameter isn't supported for tasks run on Fargate.

\n
" } }, "placementConstraints": { @@ -12445,31 +12451,31 @@ "compatibilities": { "target": "com.amazonaws.ecs#CompatibilityList", "traits": { - "smithy.api#documentation": "

Amazon ECS validates the task definition parameters with those supported by the launch type. For more\n\t\t\tinformation, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Amazon ECS validates the task definition parameters with those supported by the launch\n\t\t\ttype. For more information, see Amazon ECS launch types\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

" } }, "runtimePlatform": { "target": "com.amazonaws.ecs#RuntimePlatform", "traits": { - "smithy.api#documentation": "

The operating system that your task definitions are running on. A platform family is specified only\n\t\t\tfor tasks using the Fargate launch type.

\n

When you specify a task in a service, this value must match the runtimePlatform value of\n\t\t\tthe service.

" + "smithy.api#documentation": "

The operating system that your task definitions are running on. A platform family is\n\t\t\tspecified only for tasks using the Fargate launch type.

\n

When you specify a task in a service, this value must match the\n\t\t\t\truntimePlatform value of the service.

" } }, "requiresCompatibilities": { "target": "com.amazonaws.ecs#CompatibilityList", "traits": { - "smithy.api#documentation": "

The task launch types the task definition was validated against. The valid values are\n\t\t\t\tEC2, FARGATE, and EXTERNAL. For more information, see Amazon ECS launch\n\t\t\t\ttypes in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The task launch types the task definition was validated against. The valid values are\n\t\t\t\tEC2, FARGATE, and EXTERNAL. For more\n\t\t\tinformation, see Amazon ECS launch types\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

" } }, "cpu": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The number of cpu units used by the task. If you use the EC2 launch type, this field is\n\t\t\toptional. Any value can be used. If you use the Fargate launch type, this field is required. You must\n\t\t\tuse one of the following values. The value that you choose determines your range of valid values for\n\t\t\tthe memory parameter.

\n

If you use the EC2 launch type, this field is optional. Supported values are between\n\t\t\t\t128 CPU units (0.125 vCPUs) and 10240 CPU units\n\t\t\t\t(10 vCPUs).

\n

The CPU units cannot be less than 1 vCPU when you use Windows containers on\n\t\t\tFargate.

\n
    \n
  • \n

    256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)

    \n
  • \n
  • \n

    512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)

    \n
  • \n
  • \n

    1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)

    \n
  • \n
  • \n

    2048 (2 vCPU) - Available memory values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)

    \n
  • \n
  • \n

    4096 (4 vCPU) - Available memory values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)

    \n
  • \n
  • \n

    8192 (8 vCPU) - Available memory values: 16 GB and 60 GB in 4 GB increments

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
  • \n

    16384 (16vCPU) - Available memory values: 32GB and 120 GB in 8 GB increments

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The number of cpu units used by the task. If you use the EC2 launch type,\n\t\t\tthis field is optional. Any value can be used. If you use the Fargate launch type, this\n\t\t\tfield is required. You must use one of the following values. The value that you choose\n\t\t\tdetermines your range of valid values for the memory parameter.

\n

If you use the EC2 launch type, this field is optional. Supported values\n\t\t\tare between 128 CPU units (0.125 vCPUs) and 10240\n\t\t\tCPU units (10 vCPUs).

\n

The CPU units cannot be less than 1 vCPU when you use Windows containers on\n\t\t\tFargate.

\n
    \n
  • \n

    256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)

    \n
  • \n
  • \n

    512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)

    \n
  • \n
  • \n

    1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)

    \n
  • \n
  • \n

    2048 (2 vCPU) - Available memory values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)

    \n
  • \n
  • \n

    4096 (4 vCPU) - Available memory values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)

    \n
  • \n
  • \n

    8192 (8 vCPU) - Available memory values: 16 GB and 60 GB in 4 GB increments

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
  • \n

    16384 (16vCPU) - Available memory values: 32GB and 120 GB in 8 GB increments

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
" } }, "memory": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The amount (in MiB) of memory used by the task.

\n

If your tasks runs on Amazon EC2 instances, you must specify either a task-level memory value or a\n\t\t\tcontainer-level memory value. This field is optional and any value can be used. If a task-level memory\n\t\t\tvalue is specified, the container-level memory value is optional. For more information regarding\n\t\t\tcontainer-level memory and memory reservation, see ContainerDefinition.

\n

If your tasks runs on Fargate, this field is required. You must use one of the following values.\n\t\t\tThe value you choose determines your range of valid values for the cpu parameter.

\n
    \n
  • \n

    512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU)

    \n
  • \n
  • \n

    1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU)

    \n
  • \n
  • \n

    2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU)

    \n
  • \n
  • \n

    Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU)

    \n
  • \n
  • \n

    Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU)

    \n
  • \n
  • \n

    Between 16 GB and 60 GB in 4 GB increments - Available cpu values: 8192 (8 vCPU)

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
  • \n

    Between 32GB and 120 GB in 8 GB increments - Available cpu values: 16384 (16 vCPU)

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The amount (in MiB) of memory used by the task.

\n

If your tasks runs on Amazon EC2 instances, you must specify either a task-level memory\n\t\t\tvalue or a container-level memory value. This field is optional and any value can be\n\t\t\tused. If a task-level memory value is specified, the container-level memory value is\n\t\t\toptional. For more information regarding container-level memory and memory reservation,\n\t\t\tsee ContainerDefinition.

\n

If your tasks runs on Fargate, this field is required. You must use one of the\n\t\t\tfollowing values. The value you choose determines your range of valid values for the\n\t\t\t\tcpu parameter.

\n
    \n
  • \n

    512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU)

    \n
  • \n
  • \n

    1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU)

    \n
  • \n
  • \n

    2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU)

    \n
  • \n
  • \n

    Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU)

    \n
  • \n
  • \n

    Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU)

    \n
  • \n
  • \n

    Between 16 GB and 60 GB in 4 GB increments - Available cpu values: 8192 (8 vCPU)

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
  • \n

    Between 32GB and 120 GB in 8 GB increments - Available cpu values: 16384 (16 vCPU)

    \n

    This option requires Linux platform 1.4.0 or\n later.

    \n
  • \n
" } }, "inferenceAccelerators": { @@ -12493,7 +12499,7 @@ "proxyConfiguration": { "target": "com.amazonaws.ecs#ProxyConfiguration", "traits": { - "smithy.api#documentation": "

The configuration details for the App Mesh proxy.

\n

Your Amazon ECS container instances require at least version 1.26.0 of the container agent and at least\n\t\t\tversion 1.26.0-1 of the ecs-init package to use a proxy configuration. If your container\n\t\t\tinstances are launched from the Amazon ECS optimized AMI version 20190301 or later, they\n\t\t\tcontain the required versions of the container agent and ecs-init. For more information,\n\t\t\tsee Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The configuration details for the App Mesh proxy.

\n

Your Amazon ECS container instances require at least version 1.26.0 of the container agent\n\t\t\tand at least version 1.26.0-1 of the ecs-init package to use a proxy\n\t\t\tconfiguration. If your container instances are launched from the Amazon ECS optimized AMI\n\t\t\tversion 20190301 or later, they contain the required versions of the\n\t\t\tcontainer agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

" } }, "registeredAt": { @@ -12519,10 +12525,16 @@ "traits": { "smithy.api#documentation": "

The ephemeral storage settings to use for tasks run with the task definition.

" } + }, + "enableFaultInjection": { + "target": "com.amazonaws.ecs#BoxedBoolean", + "traits": { + "smithy.api#documentation": "

Enables fault injection and allows for fault injection requests to be accepted from the task's containers. \n\t\t\tThe default value is false.

" + } } }, "traits": { - "smithy.api#documentation": "

The details of a task definition which describes the container and volume definitions of an Amazon Elastic Container Service\n\t\t\ttask. You can specify which Docker images to use, the required resources, and other configurations\n\t\t\trelated to launching the task definition through an Amazon ECS service or task.

" + "smithy.api#documentation": "

The details of a task definition which describes the container and volume definitions\n\t\t\tof an Amazon Elastic Container Service task. You can specify which Docker images to use, the required\n\t\t\tresources, and other configurations related to launching the task definition through an\n\t\t\tAmazon ECS service or task.

" } }, "com.amazonaws.ecs#TaskDefinitionFamilyStatus": { @@ -12577,18 +12589,18 @@ "type": { "target": "com.amazonaws.ecs#TaskDefinitionPlacementConstraintType", "traits": { - "smithy.api#documentation": "

The type of constraint. The MemberOf constraint restricts selection to be from a group\n\t\t\tof valid candidates.

" + "smithy.api#documentation": "

The type of constraint. The MemberOf constraint restricts selection to be\n\t\t\tfrom a group of valid candidates.

" } }, "expression": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

A cluster query language expression to apply to the constraint. For more information, see Cluster\n\t\t\t\tquery language in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

A cluster query language expression to apply to the constraint. For more information,\n\t\t\tsee Cluster query language in the Amazon Elastic Container Service Developer Guide.

" } } }, "traits": { - "smithy.api#documentation": "

The constraint on task placement in the task definition. For more information, see Task\n\t\t\t\tplacement constraints in the Amazon Elastic Container Service Developer Guide.

\n \n

Task placement constraints aren't supported for tasks run on Fargate.

\n
" + "smithy.api#documentation": "

The constraint on task placement in the task definition. For more information, see\n\t\t\t\tTask placement constraints in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

\n \n

Task placement constraints aren't supported for tasks run on Fargate.

\n
" } }, "com.amazonaws.ecs#TaskDefinitionPlacementConstraintType": { @@ -12638,13 +12650,13 @@ "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The total amount, in GiB, of the ephemeral storage to set for the task. The minimum supported value\n\t\t\tis 20 GiB and the maximum supported value is\u2028 200 GiB.

" + "smithy.api#documentation": "

The total amount, in GiB, of the ephemeral storage to set for the task. The minimum\n\t\t\tsupported value is 20 GiB and the maximum supported value is\u2028\n\t\t\t\t200 GiB.

" } }, "kmsKeyId": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

Specify an Key Management Service key ID to encrypt the ephemeral storage for the task.

" + "smithy.api#documentation": "

Specify an Key Management Service key ID to encrypt the ephemeral storage for the\n\t\t\ttask.

" } } }, @@ -12704,73 +12716,73 @@ "encrypted": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

Indicates whether the volume should be encrypted. If no value is specified, encryption is turned on\n\t\t\tby default. This parameter maps 1:1 with the Encrypted parameter of the CreateVolume\n\t\t\t\tAPI in the Amazon EC2 API Reference.

" + "smithy.api#documentation": "

Indicates whether the volume should be encrypted. If no value is specified, encryption\n\t\t\tis turned on by default. This parameter maps 1:1 with the Encrypted\n\t\t\tparameter of the CreateVolume API in\n\t\t\tthe Amazon EC2 API Reference.

" } }, "kmsKeyId": { "target": "com.amazonaws.ecs#EBSKMSKeyId", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) identifier of the Amazon Web Services Key Management Service key to use for Amazon EBS encryption. When encryption is turned\n\t\t\ton and no Amazon Web Services Key Management Service key is specified, the default Amazon Web Services managed key for Amazon EBS volumes is used. This\n\t\t\tparameter maps 1:1 with the KmsKeyId parameter of the CreateVolume API in the\n\t\t\t\tAmazon EC2 API Reference.

\n \n

Amazon Web Services authenticates the Amazon Web Services Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or\n\t\t\t\tARN that is invalid, the action can appear to complete, but eventually fails.

\n
" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) identifier of the Amazon Web Services Key Management Service key to use for Amazon EBS encryption. When\n\t\t\tencryption is turned on and no Amazon Web Services Key Management Service key is specified, the default Amazon Web Services managed key\n\t\t\tfor Amazon EBS volumes is used. This parameter maps 1:1 with the KmsKeyId\n\t\t\tparameter of the CreateVolume API in\n\t\t\tthe Amazon EC2 API Reference.

\n \n

Amazon Web Services authenticates the Amazon Web Services Key Management Service key asynchronously. Therefore, if you specify an\n\t\t\t\tID, alias, or ARN that is invalid, the action can appear to complete, but\n\t\t\t\teventually fails.

\n
" } }, "volumeType": { "target": "com.amazonaws.ecs#EBSVolumeType", "traits": { - "smithy.api#documentation": "

The volume type. This parameter maps 1:1 with the VolumeType parameter of the CreateVolume\n\t\t\t\tAPI in the Amazon EC2 API Reference. For more information, see Amazon EBS volume types\n\t\t\tin the Amazon EC2 User Guide.

\n

The following are the supported volume types.

\n
    \n
  • \n

    General Purpose SSD: gp2|gp3\n

    \n
  • \n
  • \n

    Provisioned IOPS SSD: io1|io2\n

    \n
  • \n
  • \n

    Throughput Optimized HDD: st1\n

    \n
  • \n
  • \n

    Cold HDD: sc1\n

    \n
  • \n
  • \n

    Magnetic: standard\n

    \n \n

    The magnetic volume type is not supported on Fargate.

    \n
    \n
  • \n
" + "smithy.api#documentation": "

The volume type. This parameter maps 1:1 with the VolumeType parameter of\n\t\t\tthe CreateVolume API in the Amazon EC2 API Reference. For more\n\t\t\tinformation, see Amazon EBS volume types in\n\t\t\tthe Amazon EC2 User Guide.

\n

The following are the supported volume types.

\n
    \n
  • \n

    General Purpose SSD: gp2|gp3\n

    \n
  • \n
  • \n

    Provisioned IOPS SSD: io1|io2\n

    \n
  • \n
  • \n

    Throughput Optimized HDD: st1\n

    \n
  • \n
  • \n

    Cold HDD: sc1\n

    \n
  • \n
  • \n

    Magnetic: standard\n

    \n \n

    The magnetic volume type is not supported on Fargate.

    \n
    \n
  • \n
" } }, "sizeInGiB": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The size of the volume in GiB. You must specify either a volume size or a snapshot ID. If you specify\n\t\t\ta snapshot ID, the snapshot size is used for the volume size by default. You can optionally specify a\n\t\t\tvolume size greater than or equal to the snapshot size. This parameter maps 1:1 with the\n\t\t\t\tSize parameter of the CreateVolume API in the\n\t\t\t\tAmazon EC2 API Reference.

\n

The following are the supported volume size values for each volume type.

\n
    \n
  • \n

    \n gp2 and gp3: 1-16,384

    \n
  • \n
  • \n

    \n io1 and io2: 4-16,384

    \n
  • \n
  • \n

    \n st1 and sc1: 125-16,384

    \n
  • \n
  • \n

    \n standard: 1-1,024

    \n
  • \n
" + "smithy.api#documentation": "

The size of the volume in GiB. You must specify either a volume size or a snapshot ID.\n\t\t\tIf you specify a snapshot ID, the snapshot size is used for the volume size by default.\n\t\t\tYou can optionally specify a volume size greater than or equal to the snapshot size.\n\t\t\tThis parameter maps 1:1 with the Size parameter of the CreateVolume API in the Amazon EC2 API Reference.

\n

The following are the supported volume size values for each volume type.

\n
    \n
  • \n

    \n gp2 and gp3: 1-16,384

    \n
  • \n
  • \n

    \n io1 and io2: 4-16,384

    \n
  • \n
  • \n

    \n st1 and sc1: 125-16,384

    \n
  • \n
  • \n

    \n standard: 1-1,024

    \n
  • \n
" } }, "snapshotId": { "target": "com.amazonaws.ecs#EBSSnapshotId", "traits": { - "smithy.api#documentation": "

The snapshot that Amazon ECS uses to create the volume. You must specify either a snapshot ID or a volume\n\t\t\tsize. This parameter maps 1:1 with the SnapshotId parameter of the CreateVolume\n\t\t\t\tAPI in the Amazon EC2 API Reference.

" + "smithy.api#documentation": "

The snapshot that Amazon ECS uses to create the volume. You must specify either a snapshot\n\t\t\tID or a volume size. This parameter maps 1:1 with the SnapshotId parameter\n\t\t\tof the CreateVolume API in\n\t\t\tthe Amazon EC2 API Reference.

" } }, "iops": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The number of I/O operations per second (IOPS). For gp3, io1, and\n\t\t\t\tio2 volumes, this represents the number of IOPS that are provisioned for the volume.\n\t\t\tFor gp2 volumes, this represents the baseline performance of the volume and the rate at\n\t\t\twhich the volume accumulates I/O credits for bursting.

\n

The following are the supported values for each volume type.

\n
    \n
  • \n

    \n gp3: 3,000 - 16,000 IOPS

    \n
  • \n
  • \n

    \n io1: 100 - 64,000 IOPS

    \n
  • \n
  • \n

    \n io2: 100 - 256,000 IOPS

    \n
  • \n
\n

This parameter is required for io1 and io2 volume types. The default for\n\t\t\t\tgp3 volumes is 3,000 IOPS. This parameter is not supported for\n\t\t\t\tst1, sc1, or standard volume types.

\n

This parameter maps 1:1 with the Iops parameter of the CreateVolume API in the\n\t\t\t\tAmazon EC2 API Reference.

" + "smithy.api#documentation": "

The number of I/O operations per second (IOPS). For gp3,\n\t\t\tio1, and io2 volumes, this represents the number of IOPS that\n\t\t\tare provisioned for the volume. For gp2 volumes, this represents the\n\t\t\tbaseline performance of the volume and the rate at which the volume accumulates I/O\n\t\t\tcredits for bursting.

\n

The following are the supported values for each volume type.

\n
    \n
  • \n

    \n gp3: 3,000 - 16,000 IOPS

    \n
  • \n
  • \n

    \n io1: 100 - 64,000 IOPS

    \n
  • \n
  • \n

    \n io2: 100 - 256,000 IOPS

    \n
  • \n
\n

This parameter is required for io1 and io2 volume types. The\n\t\t\tdefault for gp3 volumes is 3,000 IOPS. This parameter is not\n\t\t\tsupported for st1, sc1, or standard volume\n\t\t\ttypes.

\n

This parameter maps 1:1 with the Iops parameter of the CreateVolume API in the Amazon EC2 API Reference.

" } }, "throughput": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The throughput to provision for a volume, in MiB/s, with a maximum of 1,000 MiB/s. This parameter\n\t\t\tmaps 1:1 with the Throughput parameter of the CreateVolume API in the\n\t\t\t\tAmazon EC2 API Reference.

\n \n

This parameter is only supported for the gp3 volume type.

\n
" + "smithy.api#documentation": "

The throughput to provision for a volume, in MiB/s, with a maximum of 1,000 MiB/s.\n\t\t\tThis parameter maps 1:1 with the Throughput parameter of the CreateVolume API in the Amazon EC2 API Reference.

\n \n

This parameter is only supported for the gp3 volume type.

\n
" } }, "tagSpecifications": { "target": "com.amazonaws.ecs#EBSTagSpecifications", "traits": { - "smithy.api#documentation": "

The tags to apply to the volume. Amazon ECS applies service-managed tags by default. This parameter maps\n\t\t\t1:1 with the TagSpecifications.N parameter of the CreateVolume API in the\n\t\t\t\tAmazon EC2 API Reference.

" + "smithy.api#documentation": "

The tags to apply to the volume. Amazon ECS applies service-managed tags by default. This\n\t\t\tparameter maps 1:1 with the TagSpecifications.N parameter of the CreateVolume API in the Amazon EC2 API Reference.

" } }, "roleArn": { "target": "com.amazonaws.ecs#IAMRoleArn", "traits": { - "smithy.api#documentation": "

The ARN of the IAM role to associate with this volume. This is the Amazon ECS infrastructure IAM role\n\t\t\tthat is used to manage your Amazon Web Services infrastructure. We recommend using the Amazon ECS-managed\n\t\t\t\tAmazonECSInfrastructureRolePolicyForVolumes IAM policy with this role. For more\n\t\t\tinformation, see Amazon ECS infrastructure IAM\n\t\t\t\trole in the Amazon ECS Developer Guide.

", + "smithy.api#documentation": "

The ARN of the IAM role to associate with this volume. This is the Amazon ECS\n\t\t\tinfrastructure IAM role that is used to manage your Amazon Web Services infrastructure. We recommend\n\t\t\tusing the Amazon ECS-managed AmazonECSInfrastructureRolePolicyForVolumes IAM\n\t\t\tpolicy with this role. For more information, see Amazon ECS\n\t\t\t\tinfrastructure IAM role in the Amazon ECS Developer\n\t\t\tGuide.

", "smithy.api#required": {} } }, "terminationPolicy": { "target": "com.amazonaws.ecs#TaskManagedEBSVolumeTerminationPolicy", "traits": { - "smithy.api#documentation": "

The termination policy for the volume when the task exits. This provides a way to control whether\n\t\t\tAmazon ECS terminates the Amazon EBS volume when the task stops.

" + "smithy.api#documentation": "

The termination policy for the volume when the task exits. This provides a way to\n\t\t\tcontrol whether Amazon ECS terminates the Amazon EBS volume when the task stops.

" } }, "filesystemType": { "target": "com.amazonaws.ecs#TaskFilesystemType", "traits": { - "smithy.api#documentation": "

The Linux filesystem type for the volume. For volumes created from a snapshot, you must specify the\n\t\t\tsame filesystem type that the volume was using when the snapshot was created. If there is a filesystem\n\t\t\ttype mismatch, the task will fail to start.

\n

The available filesystem types are\u2028 ext3, ext4, and xfs. If no\n\t\t\tvalue is specified, the xfs filesystem type is used by default.

" + "smithy.api#documentation": "

The Linux filesystem type for the volume. For volumes created from a snapshot, you\n\t\t\tmust specify the same filesystem type that the volume was using when the snapshot was\n\t\t\tcreated. If there is a filesystem type mismatch, the task will fail to start.

\n

The available filesystem types are\u2028 ext3, ext4, and\n\t\t\t\txfs. If no value is specified, the xfs filesystem type is\n\t\t\tused by default.

" } } }, "traits": { - "smithy.api#documentation": "

The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf. These settings\n\t\t\tare used to create each Amazon EBS volume, with one volume created for each task.

" + "smithy.api#documentation": "

The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf.\n\t\t\tThese settings are used to create each Amazon EBS volume, with one volume created for each\n\t\t\ttask.

" } }, "com.amazonaws.ecs#TaskManagedEBSVolumeTerminationPolicy": { @@ -12779,13 +12791,13 @@ "deleteOnTermination": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

Indicates whether the volume should be deleted on when the task stops. If a value of\n\t\t\t\ttrue is specified, \u2028Amazon ECS deletes the Amazon EBS volume on your behalf when the task goes\n\t\t\tinto the STOPPED state. If no value is specified, the \u2028default value is true\n\t\t\tis used. When set to false, Amazon ECS leaves the volume in your \u2028account.

", + "smithy.api#documentation": "

Indicates whether the volume should be deleted on when the task stops. If a value of\n\t\t\t\ttrue is specified, \u2028Amazon ECS deletes the Amazon EBS volume on your behalf when\n\t\t\tthe task goes into the STOPPED state. If no value is specified, the\n\t\t\t\u2028default value is true is used. When set to false, Amazon ECS\n\t\t\tleaves the volume in your \u2028account.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

The termination policy for the Amazon EBS volume when the task exits. For more information, see Amazon ECS\n\t\t\t\tvolume termination policy.

" + "smithy.api#documentation": "

The termination policy for the Amazon EBS volume when the task exits. For more information,\n\t\t\tsee Amazon ECS volume termination policy.

" } }, "com.amazonaws.ecs#TaskOverride": { @@ -12812,7 +12824,7 @@ "executionRoleArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the task execution role override for the task. For more information, see Amazon ECS task\n\t\t\t\texecution IAM role in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the task execution role override for the task. For more information,\n\t\t\tsee Amazon ECS task\n\t\t\t\texecution IAM role in the Amazon Elastic Container Service Developer Guide.

" } }, "memory": { @@ -12824,13 +12836,13 @@ "taskRoleArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the role that containers in this task can assume. All containers in this task are\n\t\t\tgranted the permissions that are specified in this role. For more information, see IAM Role for\n\t\t\t\tTasks in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the role that containers in this task can assume. All containers in\n\t\t\tthis task are granted the permissions that are specified in this role. For more\n\t\t\tinformation, see IAM Role for Tasks\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

" } }, "ephemeralStorage": { "target": "com.amazonaws.ecs#EphemeralStorage", "traits": { - "smithy.api#documentation": "

The ephemeral storage setting override for the task.

\n \n

This parameter is only supported for tasks hosted on Fargate that use the following\n\t\t\t\tplatform versions:

\n
    \n
  • \n

    Linux platform version 1.4.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
\n
" + "smithy.api#documentation": "

The ephemeral storage setting override for the task.

\n \n

This parameter is only supported for tasks hosted on Fargate that\n\t\t\t\tuse the following platform versions:

\n
    \n
  • \n

    Linux platform version 1.4.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
\n
" } } }, @@ -12862,25 +12874,25 @@ "clusterArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the cluster that the service that hosts the task set exists in.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the cluster that the service that hosts the task set exists\n\t\t\tin.

" } }, "startedBy": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The tag specified when a task set is started. If an CodeDeploy deployment created the task set, the\n\t\t\t\tstartedBy parameter is CODE_DEPLOY. If an external deployment created the\n\t\t\ttask set, the startedBy field isn't used.

" + "smithy.api#documentation": "

The tag specified when a task set is started. If an CodeDeploy deployment created the task\n\t\t\tset, the startedBy parameter is CODE_DEPLOY. If an external\n\t\t\tdeployment created the task set, the startedBy field isn't used.

" } }, "externalId": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The external ID associated with the task set.

\n

If an CodeDeploy deployment created a task set, the externalId parameter contains the CodeDeploy\n\t\t\tdeployment ID.

\n

If a task set is created for an external deployment and is associated with a service discovery\n\t\t\tregistry, the externalId parameter contains the ECS_TASK_SET_EXTERNAL_ID\n\t\t\tCloud Map attribute.

" + "smithy.api#documentation": "

The external ID associated with the task set.

\n

If an CodeDeploy deployment created a task set, the externalId parameter\n\t\t\tcontains the CodeDeploy deployment ID.

\n

If a task set is created for an external deployment and is associated with a service\n\t\t\tdiscovery registry, the externalId parameter contains the\n\t\t\t\tECS_TASK_SET_EXTERNAL_ID Cloud Map attribute.

" } }, "status": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The status of the task set. The following describes each state.

\n
\n
PRIMARY
\n
\n

The task set is serving production traffic.

\n
\n
ACTIVE
\n
\n

The task set isn't serving production traffic.

\n
\n
DRAINING
\n
\n

The tasks in the task set are being stopped, and their corresponding targets are being\n\t\t\t\t\t\tderegistered from their target group.

\n
\n
" + "smithy.api#documentation": "

The status of the task set. The following describes each state.

\n
\n
PRIMARY
\n
\n

The task set is serving production traffic.

\n
\n
ACTIVE
\n
\n

The task set isn't serving production traffic.

\n
\n
DRAINING
\n
\n

The tasks in the task set are being stopped, and their corresponding\n\t\t\t\t\t\ttargets are being deregistered from their target group.

\n
\n
" } }, "taskDefinition": { @@ -12893,21 +12905,21 @@ "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The computed desired count for the task set. This is calculated by multiplying the service's\n\t\t\t\tdesiredCount by the task set's scale percentage. The result is always\n\t\t\trounded up. For example, if the computed desired count is 1.2, it rounds up to 2 tasks.

" + "smithy.api#documentation": "

The computed desired count for the task set. This is calculated by multiplying the\n\t\t\tservice's desiredCount by the task set's scale percentage. The\n\t\t\tresult is always rounded up. For example, if the computed desired count is 1.2, it\n\t\t\trounds up to 2 tasks.

" } }, "pendingCount": { "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The number of tasks in the task set that are in the PENDING status during a deployment.\n\t\t\tA task in the PENDING state is preparing to enter the RUNNING state. A task\n\t\t\tset enters the PENDING status when it launches for the first time or when it's restarted\n\t\t\tafter being in the STOPPED state.

" + "smithy.api#documentation": "

The number of tasks in the task set that are in the PENDING status during\n\t\t\ta deployment. A task in the PENDING state is preparing to enter the\n\t\t\t\tRUNNING state. A task set enters the PENDING status when\n\t\t\tit launches for the first time or when it's restarted after being in the\n\t\t\t\tSTOPPED state.

" } }, "runningCount": { "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The number of tasks in the task set that are in the RUNNING status during a deployment.\n\t\t\tA task in the RUNNING state is running and ready for use.

" + "smithy.api#documentation": "

The number of tasks in the task set that are in the RUNNING status during\n\t\t\ta deployment. A task in the RUNNING state is running and ready for\n\t\t\tuse.

" } }, "createdAt": { @@ -12925,7 +12937,7 @@ "launchType": { "target": "com.amazonaws.ecs#LaunchType", "traits": { - "smithy.api#documentation": "

The launch type the tasks in the task set are using. For more information, see Amazon ECS launch\n\t\t\t\ttypes in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The launch type the tasks in the task set are using. For more information, see Amazon ECS\n\t\t\t\tlaunch types in the Amazon Elastic Container Service Developer Guide.

" } }, "capacityProviderStrategy": { @@ -12937,13 +12949,13 @@ "platformVersion": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Fargate platform version where the tasks in the task set are running. A platform version is\n\t\t\tonly specified for tasks run on Fargate. For more information, see Fargate platform versions in\n\t\t\tthe Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The Fargate platform version where the tasks in the task set are running. A platform\n\t\t\tversion is only specified for tasks run on Fargate. For more information, see Fargate platform versions in the Amazon Elastic Container Service Developer Guide.

" } }, "platformFamily": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The operating system that your tasks in the set are running on. A platform family is specified only\n\t\t\tfor tasks that use the Fargate launch type.

\n

All tasks in the set must have the same value.

" + "smithy.api#documentation": "

The operating system that your tasks in the set are running on. A platform family is\n\t\t\tspecified only for tasks that use the Fargate launch type.

\n

All tasks in the set must have the same value.

" } }, "networkConfiguration": { @@ -12961,31 +12973,31 @@ "serviceRegistries": { "target": "com.amazonaws.ecs#ServiceRegistries", "traits": { - "smithy.api#documentation": "

The details for the service discovery registries to assign to this task set. For more information,\n\t\t\tsee Service\n\t\t\t\tdiscovery.

" + "smithy.api#documentation": "

The details for the service discovery registries to assign to this task set. For more\n\t\t\tinformation, see Service\n\t\t\t\tdiscovery.

" } }, "scale": { "target": "com.amazonaws.ecs#Scale", "traits": { - "smithy.api#documentation": "

A floating-point percentage of your desired number of tasks to place and keep running in the task\n\t\t\tset.

" + "smithy.api#documentation": "

A floating-point percentage of your desired number of tasks to place and keep running\n\t\t\tin the task set.

" } }, "stabilityStatus": { "target": "com.amazonaws.ecs#StabilityStatus", "traits": { - "smithy.api#documentation": "

The stability status. This indicates whether the task set has reached a steady state. If the\n\t\t\tfollowing conditions are met, the task set are in STEADY_STATE:

\n
    \n
  • \n

    The task runningCount is equal to the computedDesiredCount.

    \n
  • \n
  • \n

    The pendingCount is 0.

    \n
  • \n
  • \n

    There are no tasks that are running on container instances in the DRAINING\n\t\t\t\t\tstatus.

    \n
  • \n
  • \n

    All tasks are reporting a healthy status from the load balancers, service discovery, and\n\t\t\t\t\tcontainer health checks.

    \n
  • \n
\n

If any of those conditions aren't met, the stability status returns STABILIZING.

" + "smithy.api#documentation": "

The stability status. This indicates whether the task set has reached a steady state.\n\t\t\tIf the following conditions are met, the task set are in\n\t\t\tSTEADY_STATE:

\n
    \n
  • \n

    The task runningCount is equal to the\n\t\t\t\t\t\tcomputedDesiredCount.

    \n
  • \n
  • \n

    The pendingCount is 0.

    \n
  • \n
  • \n

    There are no tasks that are running on container instances in the\n\t\t\t\t\t\tDRAINING status.

    \n
  • \n
  • \n

    All tasks are reporting a healthy status from the load balancers, service\n\t\t\t\t\tdiscovery, and container health checks.

    \n
  • \n
\n

If any of those conditions aren't met, the stability status returns\n\t\t\t\tSTABILIZING.

" } }, "stabilityStatusAt": { "target": "com.amazonaws.ecs#Timestamp", "traits": { - "smithy.api#documentation": "

The Unix timestamp for the time when the task set stability status was retrieved.

" + "smithy.api#documentation": "

The Unix timestamp for the time when the task set stability status was\n\t\t\tretrieved.

" } }, "tags": { "target": "com.amazonaws.ecs#Tags", "traits": { - "smithy.api#documentation": "

The metadata that you apply to the task set to help you categorize and organize them. Each tag\n\t\t\tconsists of a key and an optional value. You define both.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" + "smithy.api#documentation": "

The metadata that you apply to the task set to help you categorize and organize them.\n\t\t\tEach tag consists of a key and an optional value. You define both.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" } }, "fargateEphemeralStorage": { @@ -12996,7 +13008,7 @@ } }, "traits": { - "smithy.api#documentation": "

Information about a set of Amazon ECS tasks in either an CodeDeploy or an EXTERNAL deployment. An\n\t\t\tAmazon ECS task set includes details such as the desired number of tasks, how many tasks are running, and\n\t\t\twhether the task set serves production traffic.

" + "smithy.api#documentation": "

Information about a set of Amazon ECS tasks in either an CodeDeploy or an EXTERNAL\n\t\t\tdeployment. An Amazon ECS task set includes details such as the desired number of tasks, how\n\t\t\tmany tasks are running, and whether the task set serves production traffic.

" } }, "com.amazonaws.ecs#TaskSetField": { @@ -13027,7 +13039,7 @@ } }, "traits": { - "smithy.api#documentation": "

The specified task set wasn't found. You can view your available task sets with DescribeTaskSets. Task sets are specific to each cluster, service and Region.

", + "smithy.api#documentation": "

The specified task set wasn't found. You can view your available task sets with DescribeTaskSets. Task sets are specific to each cluster, service and\n\t\t\tRegion.

", "smithy.api#error": "client" } }, @@ -13084,19 +13096,19 @@ "name": { "target": "com.amazonaws.ecs#ECSVolumeName", "traits": { - "smithy.api#documentation": "

The name of the volume. This value must match the volume name from the Volume object in\n\t\t\tthe task definition.

", + "smithy.api#documentation": "

The name of the volume. This value must match the volume name from the\n\t\t\t\tVolume object in the task definition.

", "smithy.api#required": {} } }, "managedEBSVolume": { "target": "com.amazonaws.ecs#TaskManagedEBSVolumeConfiguration", "traits": { - "smithy.api#documentation": "

The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf. These settings\n\t\t\tare used to create each Amazon EBS volume, with one volume created for each task. The Amazon EBS volumes are\n\t\t\tvisible in your account in the Amazon EC2 console once they are created.

" + "smithy.api#documentation": "

The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf.\n\t\t\tThese settings are used to create each Amazon EBS volume, with one volume created for each\n\t\t\ttask. The Amazon EBS volumes are visible in your account in the Amazon EC2 console once they are\n\t\t\tcreated.

" } } }, "traits": { - "smithy.api#documentation": "

Configuration settings for the task volume that was configuredAtLaunch that weren't set\n\t\t\tduring RegisterTaskDef.

" + "smithy.api#documentation": "

Configuration settings for the task volume that was configuredAtLaunch\n\t\t\tthat weren't set during RegisterTaskDef.

" } }, "com.amazonaws.ecs#TaskVolumeConfigurations": { @@ -13117,18 +13129,18 @@ "idleTimeoutSeconds": { "target": "com.amazonaws.ecs#Duration", "traits": { - "smithy.api#documentation": "

The amount of time in seconds a connection will stay active while idle. A value of 0 can\n\t\t\tbe set to disable idleTimeout.

\n

The idleTimeout default for HTTP/HTTP2/GRPC is 5\n\t\t\tminutes.

\n

The idleTimeout default for TCP is 1 hour.

" + "smithy.api#documentation": "

The amount of time in seconds a connection will stay active while idle. A value of\n\t\t\t\t0 can be set to disable idleTimeout.

\n

The idleTimeout default for\n\t\t\t\tHTTP/HTTP2/GRPC is 5 minutes.

\n

The idleTimeout default for TCP is 1 hour.

" } }, "perRequestTimeoutSeconds": { "target": "com.amazonaws.ecs#Duration", "traits": { - "smithy.api#documentation": "

The amount of time waiting for the upstream to respond with a complete response per request. A value\n\t\t\tof 0 can be set to disable perRequestTimeout. perRequestTimeout\n\t\t\tcan only be set if Service Connect appProtocol isn't TCP. Only\n\t\t\t\tidleTimeout is allowed for TCP\n appProtocol.

" + "smithy.api#documentation": "

The amount of time waiting for the upstream to respond with a complete response per\n\t\t\trequest. A value of 0 can be set to disable perRequestTimeout.\n\t\t\t\tperRequestTimeout can only be set if Service Connect\n\t\t\t\tappProtocol isn't TCP. Only idleTimeout is\n\t\t\tallowed for TCP\n appProtocol.

" } } }, "traits": { - "smithy.api#documentation": "

An object that represents the timeout configurations for Service Connect.

\n \n

If idleTimeout is set to a time that is less than perRequestTimeout,\n\t\t\t\tthe connection will close when the idleTimeout is reached and not the\n\t\t\t\t\tperRequestTimeout.

\n
" + "smithy.api#documentation": "

An object that represents the timeout configurations for Service Connect.

\n \n

If idleTimeout is set to a time that is less than\n\t\t\t\t\tperRequestTimeout, the connection will close when the\n\t\t\t\t\tidleTimeout is reached and not the\n\t\t\t\tperRequestTimeout.

\n
" } }, "com.amazonaws.ecs#Timestamp": { @@ -13155,7 +13167,7 @@ "mountOptions": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The list of tmpfs volume mount options.

\n

Valid values: \"defaults\" | \"ro\" | \"rw\" | \"suid\" | \"nosuid\" | \"dev\" | \"nodev\" | \"exec\" |\n\t\t\t\t\"noexec\" | \"sync\" | \"async\" | \"dirsync\" | \"remount\" | \"mand\" | \"nomand\" | \"atime\" | \"noatime\" |\n\t\t\t\t\"diratime\" | \"nodiratime\" | \"bind\" | \"rbind\" | \"unbindable\" | \"runbindable\" | \"private\" |\n\t\t\t\t\"rprivate\" | \"shared\" | \"rshared\" | \"slave\" | \"rslave\" | \"relatime\" | \"norelatime\" | \"strictatime\"\n\t\t\t\t| \"nostrictatime\" | \"mode\" | \"uid\" | \"gid\" | \"nr_inodes\" | \"nr_blocks\" | \"mpol\"\n

" + "smithy.api#documentation": "

The list of tmpfs volume mount options.

\n

Valid values: \"defaults\" | \"ro\" | \"rw\" | \"suid\" | \"nosuid\" | \"dev\" | \"nodev\" |\n\t\t\t\t\"exec\" | \"noexec\" | \"sync\" | \"async\" | \"dirsync\" | \"remount\" | \"mand\" | \"nomand\" |\n\t\t\t\t\"atime\" | \"noatime\" | \"diratime\" | \"nodiratime\" | \"bind\" | \"rbind\" | \"unbindable\" |\n\t\t\t\t\"runbindable\" | \"private\" | \"rprivate\" | \"shared\" | \"rshared\" | \"slave\" | \"rslave\" |\n\t\t\t\t\"relatime\" | \"norelatime\" | \"strictatime\" | \"nostrictatime\" | \"mode\" | \"uid\" | \"gid\"\n\t\t\t\t| \"nr_inodes\" | \"nr_blocks\" | \"mpol\"\n

" } } }, @@ -13200,7 +13212,7 @@ "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The soft limit for the ulimit type. The value can be specified in bytes, seconds, or as\n\t\t\ta count, depending on the type of the ulimit.

", + "smithy.api#documentation": "

The soft limit for the ulimit type. The value can be specified in bytes,\n\t\t\tseconds, or as a count, depending on the type of the\n\t\t\tulimit.

", "smithy.api#required": {} } }, @@ -13208,13 +13220,13 @@ "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The hard limit for the ulimit type. The value can be specified in bytes, seconds, or as\n\t\t\ta count, depending on the type of the ulimit.

", + "smithy.api#documentation": "

The hard limit for the ulimit type. The value can be specified in bytes,\n\t\t\tseconds, or as a count, depending on the type of the\n\t\t\tulimit.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

The ulimit settings to pass to the container.

\n

Amazon ECS tasks hosted on Fargate use the default\n\t\t\t\t\t\t\tresource limit values set by the operating system with the exception of\n\t\t\t\t\t\t\tthe nofile resource limit parameter which Fargate\n\t\t\t\t\t\t\toverrides. The nofile resource limit sets a restriction on\n\t\t\t\t\t\t\tthe number of open files that a container can use. The default\n\t\t\t\t\t\t\t\tnofile soft limit is 65535 and the default hard limit\n\t\t\t\t\t\t\tis 65535.

\n

You can specify the ulimit settings for a container in a task definition.

" + "smithy.api#documentation": "

The ulimit settings to pass to the container.

\n

Amazon ECS tasks hosted on Fargate use the default\n\t\t\t\t\t\t\tresource limit values set by the operating system with the exception of\n\t\t\t\t\t\t\tthe nofile resource limit parameter which Fargate\n\t\t\t\t\t\t\toverrides. The nofile resource limit sets a restriction on\n\t\t\t\t\t\t\tthe number of open files that a container can use. The default\n\t\t\t\t\t\t\t\tnofile soft limit is 65535 and the default hard limit\n\t\t\t\t\t\t\tis 65535.

\n

You can specify the ulimit settings for a container in a task\n\t\t\tdefinition.

" } }, "com.amazonaws.ecs#UlimitList": { @@ -13381,7 +13393,7 @@ "resourceArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource to delete tags from. Currently, the supported resources are Amazon ECS\n\t\t\tcapacity providers, tasks, services, task definitions, clusters, and container instances.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource to delete tags from. Currently, the supported resources\n\t\t\tare Amazon ECS capacity providers, tasks, services, task definitions, clusters, and container\n\t\t\tinstances.

", "smithy.api#required": {} } }, @@ -13578,7 +13590,7 @@ "settings": { "target": "com.amazonaws.ecs#ClusterSettings", "traits": { - "smithy.api#documentation": "

The setting to use by default for a cluster. This parameter is used to turn on CloudWatch Container\n\t\t\tInsights for a cluster. If this value is specified, it overrides the containerInsights\n\t\t\tvalue set with PutAccountSetting or PutAccountSettingDefault.

\n \n

Currently, if you delete an existing cluster that does not have Container Insights turned on, and\n\t\t\t\tthen create a new cluster with the same name with Container Insights tuned on, Container Insights\n\t\t\t\twill not actually be turned on. If you want to preserve the same name for your existing cluster and\n\t\t\t\tturn on Container Insights, you must wait 7 days before you can re-create it.

\n
", + "smithy.api#documentation": "

The setting to use by default for a cluster. This parameter is used to turn on CloudWatch\n\t\t\tContainer Insights for a cluster. If this value is specified, it overrides the\n\t\t\t\tcontainerInsights value set with PutAccountSetting or PutAccountSettingDefault.

\n \n

Currently, if you delete an existing cluster that does not have Container Insights\n\t\t\t\tturned on, and then create a new cluster with the same name with Container Insights\n\t\t\t\ttuned on, Container Insights will not actually be turned on. If you want to preserve\n\t\t\t\tthe same name for your existing cluster and turn on Container Insights, you must\n\t\t\t\twait 7 days before you can re-create it.

\n
", "smithy.api#required": {} } } @@ -13633,7 +13645,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the Amazon ECS container agent on a specified container instance. Updating the Amazon ECS container\n\t\t\tagent doesn't interrupt running tasks or services on the container instance. The process for updating\n\t\t\tthe agent differs depending on whether your container instance was launched with the Amazon ECS-optimized\n\t\t\tAMI or another operating system.

\n \n

The UpdateContainerAgent API isn't supported for container instances using the\n\t\t\t\tAmazon ECS-optimized Amazon Linux 2 (arm64) AMI. To update the container agent, you can update the\n\t\t\t\t\tecs-init package. This updates the agent. For more information, see Updating\n\t\t\t\t\tthe Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide.

\n
\n \n

Agent updates with the UpdateContainerAgent API operation do not apply to Windows\n\t\t\t\tcontainer instances. We recommend that you launch new container instances to update the agent\n\t\t\t\tversion in your Windows clusters.

\n
\n

The UpdateContainerAgent API requires an Amazon ECS-optimized AMI or Amazon Linux AMI with\n\t\t\tthe ecs-init service installed and running. For help updating the Amazon ECS container agent on\n\t\t\tother operating systems, see Manually updating\n\t\t\t\tthe Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Updates the Amazon ECS container agent on a specified container instance. Updating the\n\t\t\tAmazon ECS container agent doesn't interrupt running tasks or services on the container\n\t\t\tinstance. The process for updating the agent differs depending on whether your container\n\t\t\tinstance was launched with the Amazon ECS-optimized AMI or another operating system.

\n \n

The UpdateContainerAgent API isn't supported for container instances\n\t\t\t\tusing the Amazon ECS-optimized Amazon Linux 2 (arm64) AMI. To update the container agent,\n\t\t\t\tyou can update the ecs-init package. This updates the agent. For more\n\t\t\t\tinformation, see Updating the\n\t\t\t\t\tAmazon ECS container agent in the Amazon Elastic Container Service Developer Guide.

\n
\n \n

Agent updates with the UpdateContainerAgent API operation do not\n\t\t\t\tapply to Windows container instances. We recommend that you launch new container\n\t\t\t\tinstances to update the agent version in your Windows clusters.

\n
\n

The UpdateContainerAgent API requires an Amazon ECS-optimized AMI or Amazon\n\t\t\tLinux AMI with the ecs-init service installed and running. For help\n\t\t\tupdating the Amazon ECS container agent on other operating systems, see Manually updating the Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#UpdateContainerAgentRequest": { @@ -13642,13 +13654,13 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that your container instance is running on.\n\t\t\tIf you do not specify a cluster, the default cluster is assumed.

" + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that your container instance is\n\t\t\trunning on. If you do not specify a cluster, the default cluster is assumed.

" } }, "containerInstance": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The container instance ID or full ARN entries for the container instance where you would like to\n\t\t\tupdate the Amazon ECS container agent.

", + "smithy.api#documentation": "

The container instance ID or full ARN entries for the container instance where you\n\t\t\twould like to update the Amazon ECS container agent.

", "smithy.api#required": {} } } @@ -13694,7 +13706,7 @@ } ], "traits": { - "smithy.api#documentation": "

Modifies the status of an Amazon ECS container instance.

\n

Once a container instance has reached an ACTIVE state, you can change the status of a\n\t\t\tcontainer instance to DRAINING to manually remove an instance from a cluster, for example\n\t\t\tto perform system updates, update the Docker daemon, or scale down the cluster size.

\n \n

A container instance can't be changed to DRAINING until it has reached an\n\t\t\t\t\tACTIVE status. If the instance is in any other status, an error will be\n\t\t\t\treceived.

\n
\n

When you set a container instance to DRAINING, Amazon ECS prevents new tasks from being\n\t\t\tscheduled for placement on the container instance and replacement service tasks are started on other\n\t\t\tcontainer instances in the cluster if the resources are available. Service tasks on the container\n\t\t\tinstance that are in the PENDING state are stopped immediately.

\n

Service tasks on the container instance that are in the RUNNING state are stopped and\n\t\t\treplaced according to the service's deployment configuration parameters,\n\t\t\t\tminimumHealthyPercent and maximumPercent. You can change the deployment\n\t\t\tconfiguration of your service using UpdateService.

\n
    \n
  • \n

    If minimumHealthyPercent is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount temporarily during task replacement. For example,\n\t\t\t\t\t\tdesiredCount is four tasks, a minimum of 50% allows the scheduler to stop two\n\t\t\t\t\texisting tasks before starting two new tasks. If the minimum is 100%, the service scheduler\n\t\t\t\t\tcan't remove existing tasks until the replacement tasks are considered healthy. Tasks for\n\t\t\t\t\tservices that do not use a load balancer are considered healthy if they're in the\n\t\t\t\t\t\tRUNNING state. Tasks for services that use a load balancer are considered\n\t\t\t\t\thealthy if they're in the RUNNING state and are reported as healthy by the load\n\t\t\t\t\tbalancer.

    \n
  • \n
  • \n

    The maximumPercent parameter represents an upper limit on the number of running\n\t\t\t\t\ttasks during task replacement. You can use this to define the replacement batch size. For\n\t\t\t\t\texample, if desiredCount is four tasks, a maximum of 200% starts four new tasks\n\t\t\t\t\tbefore stopping the four tasks to be drained, provided that the cluster resources required to\n\t\t\t\t\tdo this are available. If the maximum is 100%, then replacement tasks can't start until the\n\t\t\t\t\tdraining tasks have stopped.

    \n
  • \n
\n

Any PENDING or RUNNING tasks that do not belong to a service aren't\n\t\t\taffected. You must wait for them to finish or stop them manually.

\n

A container instance has completed draining when it has no more RUNNING tasks. You can\n\t\t\tverify this using ListTasks.

\n

When a container instance has been drained, you can set a container instance to ACTIVE\n\t\t\tstatus and once it has reached that status the Amazon ECS scheduler can begin scheduling tasks on the\n\t\t\tinstance again.

" + "smithy.api#documentation": "

Modifies the status of an Amazon ECS container instance.

\n

Once a container instance has reached an ACTIVE state, you can change the\n\t\t\tstatus of a container instance to DRAINING to manually remove an instance\n\t\t\tfrom a cluster, for example to perform system updates, update the Docker daemon, or\n\t\t\tscale down the cluster size.

\n \n

A container instance can't be changed to DRAINING until it has\n\t\t\t\treached an ACTIVE status. If the instance is in any other status, an\n\t\t\t\terror will be received.

\n
\n

When you set a container instance to DRAINING, Amazon ECS prevents new tasks\n\t\t\tfrom being scheduled for placement on the container instance and replacement service\n\t\t\ttasks are started on other container instances in the cluster if the resources are\n\t\t\tavailable. Service tasks on the container instance that are in the PENDING\n\t\t\tstate are stopped immediately.

\n

Service tasks on the container instance that are in the RUNNING state are\n\t\t\tstopped and replaced according to the service's deployment configuration parameters,\n\t\t\t\tminimumHealthyPercent and maximumPercent. You can change\n\t\t\tthe deployment configuration of your service using UpdateService.

\n
    \n
  • \n

    If minimumHealthyPercent is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount temporarily during task replacement. For example,\n\t\t\t\t\t\tdesiredCount is four tasks, a minimum of 50% allows the\n\t\t\t\t\tscheduler to stop two existing tasks before starting two new tasks. If the\n\t\t\t\t\tminimum is 100%, the service scheduler can't remove existing tasks until the\n\t\t\t\t\treplacement tasks are considered healthy. Tasks for services that do not use a\n\t\t\t\t\tload balancer are considered healthy if they're in the RUNNING\n\t\t\t\t\tstate. Tasks for services that use a load balancer are considered healthy if\n\t\t\t\t\tthey're in the RUNNING state and are reported as healthy by the\n\t\t\t\t\tload balancer.

    \n
  • \n
  • \n

    The maximumPercent parameter represents an upper limit on the\n\t\t\t\t\tnumber of running tasks during task replacement. You can use this to define the\n\t\t\t\t\treplacement batch size. For example, if desiredCount is four tasks,\n\t\t\t\t\ta maximum of 200% starts four new tasks before stopping the four tasks to be\n\t\t\t\t\tdrained, provided that the cluster resources required to do this are available.\n\t\t\t\t\tIf the maximum is 100%, then replacement tasks can't start until the draining\n\t\t\t\t\ttasks have stopped.

    \n
  • \n
\n

Any PENDING or RUNNING tasks that do not belong to a service\n\t\t\taren't affected. You must wait for them to finish or stop them manually.

\n

A container instance has completed draining when it has no more RUNNING\n\t\t\ttasks. You can verify this using ListTasks.

\n

When a container instance has been drained, you can set a container instance to\n\t\t\t\tACTIVE status and once it has reached that status the Amazon ECS scheduler\n\t\t\tcan begin scheduling tasks on the instance again.

" } }, "com.amazonaws.ecs#UpdateContainerInstancesStateRequest": { @@ -13703,7 +13715,7 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instance to update.\n\t\t\tIf you do not specify a cluster, the default cluster is assumed.

" + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instance to\n\t\t\tupdate. If you do not specify a cluster, the default cluster is assumed.

" } }, "containerInstances": { @@ -13716,7 +13728,7 @@ "status": { "target": "com.amazonaws.ecs#ContainerInstanceStatus", "traits": { - "smithy.api#documentation": "

The container instance state to update the container instance with. The only valid values for this\n\t\t\taction are ACTIVE and DRAINING. A container instance can only be updated to\n\t\t\t\tDRAINING status once it has reached an ACTIVE state. If a container\n\t\t\tinstance is in REGISTERING, DEREGISTERING, or\n\t\t\t\tREGISTRATION_FAILED state you can describe the container instance but can't update the\n\t\t\tcontainer instance state.

", + "smithy.api#documentation": "

The container instance state to update the container instance with. The only valid\n\t\t\tvalues for this action are ACTIVE and DRAINING. A container\n\t\t\tinstance can only be updated to DRAINING status once it has reached an\n\t\t\t\tACTIVE state. If a container instance is in REGISTERING,\n\t\t\t\tDEREGISTERING, or REGISTRATION_FAILED state you can\n\t\t\tdescribe the container instance but can't update the container instance state.

", "smithy.api#required": {} } } @@ -13756,7 +13768,7 @@ } }, "traits": { - "smithy.api#documentation": "

There's already a current Amazon ECS container agent update in progress on the container instance that's\n\t\t\tspecified. If the container agent becomes disconnected while it's in a transitional stage, such as\n\t\t\t\tPENDING or STAGING, the update process can get stuck in that state.\n\t\t\tHowever, when the agent reconnects, it resumes where it stopped previously.

", + "smithy.api#documentation": "

There's already a current Amazon ECS container agent update in progress on the container\n\t\t\tinstance that's specified. If the container agent becomes disconnected while it's in a\n\t\t\ttransitional stage, such as PENDING or STAGING, the update\n\t\t\tprocess can get stuck in that state. However, when the agent reconnects, it resumes\n\t\t\twhere it stopped previously.

", "smithy.api#error": "client" } }, @@ -13804,7 +13816,7 @@ } ], "traits": { - "smithy.api#documentation": "

Modifies the parameters of a service.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n

For services using the rolling update (ECS) you can update the desired count, deployment\n\t\t\tconfiguration, network configuration, load balancers, service registries, enable ECS managed tags\n\t\t\toption, propagate tags option, task placement constraints and strategies, and task definition. When you\n\t\t\tupdate any of these parameters, Amazon ECS starts new tasks with the new configuration.

\n

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a\n\t\t\ttask, or when creating or updating a service. For more infomation, see Amazon EBS\n\t\t\t\tvolumes in the Amazon Elastic Container Service Developer Guide. You can update your volume configurations and trigger a new\n\t\t\tdeployment. volumeConfigurations is only supported for REPLICA service and not DAEMON\n\t\t\tservice. If you leave volumeConfigurations\n null, it doesn't trigger a new deployment. For more infomation on volumes, see Amazon EBS\n\t\t\t\tvolumes in the Amazon Elastic Container Service Developer Guide.

\n

For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired\n\t\t\tcount, deployment configuration, health check grace period, task placement constraints and strategies,\n\t\t\tenable ECS managed tags option, and propagate tags can be updated using this API. If the network\n\t\t\tconfiguration, platform version, task definition, or load balancer need to be updated, create a new\n\t\t\tCodeDeploy deployment. For more information, see CreateDeployment in the\n\t\t\tCodeDeploy API Reference.

\n

For services using an external deployment controller, you can update only the desired count, task\n\t\t\tplacement constraints and strategies, health check grace period, enable ECS managed tags option, and\n\t\t\tpropagate tags option, using this API. If the launch type, load balancer, network configuration,\n\t\t\tplatform version, or task definition need to be updated, create a new task set For more information,\n\t\t\tsee CreateTaskSet.

\n

You can add to or subtract from the number of instantiations of a task definition in a service by\n\t\t\tspecifying the cluster that the service is running in and a new desiredCount\n\t\t\tparameter.

\n

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a\n\t\t\ttask, or when creating or updating a service. For more infomation, see Amazon EBS\n\t\t\t\tvolumes in the Amazon Elastic Container Service Developer Guide.

\n

If you have updated the container image of your application, you can create a new task definition\n\t\t\twith that image and deploy it to your service. The service scheduler uses the minimum healthy percent\n\t\t\tand maximum percent parameters (in the service's deployment configuration) to determine the deployment\n\t\t\tstrategy.

\n \n

If your updated Docker image uses the same tag as what is in the existing task definition for\n\t\t\t\tyour service (for example, my_image:latest), you don't need to create a new revision\n\t\t\t\tof your task definition. You can update the service using the forceNewDeployment\n\t\t\t\toption. The new tasks launched by the deployment pull the current image/tag combination from your\n\t\t\t\trepository when they start.

\n
\n

You can also update the deployment configuration of a service. When a deployment is triggered by\n\t\t\tupdating the task definition of a service, the service scheduler uses the deployment configuration\n\t\t\tparameters, minimumHealthyPercent and maximumPercent, to determine the\n\t\t\tdeployment strategy.

\n
    \n
  • \n

    If minimumHealthyPercent is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount temporarily during a deployment. For example, if\n\t\t\t\t\t\tdesiredCount is four tasks, a minimum of 50% allows the scheduler to stop two\n\t\t\t\t\texisting tasks before starting two new tasks. Tasks for services that don't use a load balancer\n\t\t\t\t\tare considered healthy if they're in the RUNNING state. Tasks for services that\n\t\t\t\t\tuse a load balancer are considered healthy if they're in the RUNNING state and are\n\t\t\t\t\treported as healthy by the load balancer.

    \n
  • \n
  • \n

    The maximumPercent parameter represents an upper limit on the number of running\n\t\t\t\t\ttasks during a deployment. You can use it to define the deployment batch size. For example, if\n\t\t\t\t\t\tdesiredCount is four tasks, a maximum of 200% starts four new tasks before\n\t\t\t\t\tstopping the four older tasks (provided that the cluster resources required to do this are\n\t\t\t\t\tavailable).

    \n
  • \n
\n

When UpdateService stops a task during a deployment, the equivalent of docker stop\n\t\t\tis issued to the containers running in the task. This results in a SIGTERM and a 30-second\n\t\t\ttimeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the\n\t\t\tcontainer handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no\n\t\t\t\tSIGKILL is sent.

\n

When the service scheduler launches new tasks, it determines task placement in your cluster with the\n\t\t\tfollowing logic.

\n
    \n
  • \n

    Determine which of the container instances in your cluster can support your service's task\n\t\t\t\t\tdefinition. For example, they have the required CPU, memory, ports, and container instance\n\t\t\t\t\tattributes.

    \n
  • \n
  • \n

    By default, the service scheduler attempts to balance tasks across Availability Zones in this\n\t\t\t\t\tmanner even though you can choose a different placement strategy.

    \n
      \n
    • \n

      Sort the valid container instances by the fewest number of running tasks for this\n\t\t\t\t\t\t\tservice in the same Availability Zone as the instance. For example, if zone A has one\n\t\t\t\t\t\t\trunning service task and zones B and C each have zero, valid container instances in\n\t\t\t\t\t\t\teither zone B or C are considered optimal for placement.

      \n
    • \n
    • \n

      Place the new service task on a valid container instance in an optimal Availability\n\t\t\t\t\t\t\tZone (based on the previous steps), favoring container instances with the fewest number\n\t\t\t\t\t\t\tof running tasks for this service.

      \n
    • \n
    \n
  • \n
\n

When the service scheduler stops running tasks, it attempts to maintain balance across the\n\t\t\tAvailability Zones in your cluster using the following logic:

\n
    \n
  • \n

    Sort the container instances by the largest number of running tasks for this service in the\n\t\t\t\t\tsame Availability Zone as the instance. For example, if zone A has one running service task and\n\t\t\t\t\tzones B and C each have two, container instances in either zone B or C are considered optimal\n\t\t\t\t\tfor termination.

    \n
  • \n
  • \n

    Stop the task on a container instance in an optimal Availability Zone (based on the previous\n\t\t\t\t\tsteps), favoring container instances with the largest number of running tasks for this\n\t\t\t\t\tservice.

    \n
  • \n
\n \n

You must have a service-linked role when you update any of the following service\n\t\t\t\tproperties:

\n
    \n
  • \n

    \n loadBalancers,

    \n
  • \n
  • \n

    \n serviceRegistries\n

    \n
  • \n
\n

For more information about the role see the CreateService request parameter \n role\n .

\n
", + "smithy.api#documentation": "

Modifies the parameters of a service.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n

For services using the rolling update (ECS) you can update the desired\n\t\t\tcount, deployment configuration, network configuration, load balancers, service\n\t\t\tregistries, enable ECS managed tags option, propagate tags option, task placement\n\t\t\tconstraints and strategies, and task definition. When you update any of these\n\t\t\tparameters, Amazon ECS starts new tasks with the new configuration.

\n

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or\n\t\t\trunning a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update\n\t\t\tyour volume configurations and trigger a new deployment.\n\t\t\t\tvolumeConfigurations is only supported for REPLICA service and not\n\t\t\tDAEMON service. If you leave volumeConfigurations\n null, it doesn't trigger a new deployment. For more infomation on volumes,\n\t\t\tsee Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

\n

For services using the blue/green (CODE_DEPLOY) deployment controller,\n\t\t\tonly the desired count, deployment configuration, health check grace period, task\n\t\t\tplacement constraints and strategies, enable ECS managed tags option, and propagate tags\n\t\t\tcan be updated using this API. If the network configuration, platform version, task\n\t\t\tdefinition, or load balancer need to be updated, create a new CodeDeploy deployment. For more\n\t\t\tinformation, see CreateDeployment in the CodeDeploy API Reference.

\n

For services using an external deployment controller, you can update only the desired\n\t\t\tcount, task placement constraints and strategies, health check grace period, enable ECS\n\t\t\tmanaged tags option, and propagate tags option, using this API. If the launch type, load\n\t\t\tbalancer, network configuration, platform version, or task definition need to be\n\t\t\tupdated, create a new task set For more information, see CreateTaskSet.

\n

You can add to or subtract from the number of instantiations of a task definition in a\n\t\t\tservice by specifying the cluster that the service is running in and a new\n\t\t\t\tdesiredCount parameter.

\n

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or\n\t\t\trunning a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

\n

If you have updated the container image of your application, you can create a new task\n\t\t\tdefinition with that image and deploy it to your service. The service scheduler uses the\n\t\t\tminimum healthy percent and maximum percent parameters (in the service's deployment\n\t\t\tconfiguration) to determine the deployment strategy.

\n \n

If your updated Docker image uses the same tag as what is in the existing task\n\t\t\t\tdefinition for your service (for example, my_image:latest), you don't\n\t\t\t\tneed to create a new revision of your task definition. You can update the service\n\t\t\t\tusing the forceNewDeployment option. The new tasks launched by the\n\t\t\t\tdeployment pull the current image/tag combination from your repository when they\n\t\t\t\tstart.

\n
\n

You can also update the deployment configuration of a service. When a deployment is\n\t\t\ttriggered by updating the task definition of a service, the service scheduler uses the\n\t\t\tdeployment configuration parameters, minimumHealthyPercent and\n\t\t\t\tmaximumPercent, to determine the deployment strategy.

\n
    \n
  • \n

    If minimumHealthyPercent is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount temporarily during a deployment. For example, if\n\t\t\t\t\t\tdesiredCount is four tasks, a minimum of 50% allows the\n\t\t\t\t\tscheduler to stop two existing tasks before starting two new tasks. Tasks for\n\t\t\t\t\tservices that don't use a load balancer are considered healthy if they're in the\n\t\t\t\t\t\tRUNNING state. Tasks for services that use a load balancer are\n\t\t\t\t\tconsidered healthy if they're in the RUNNING state and are reported\n\t\t\t\t\tas healthy by the load balancer.

    \n
  • \n
  • \n

    The maximumPercent parameter represents an upper limit on the\n\t\t\t\t\tnumber of running tasks during a deployment. You can use it to define the\n\t\t\t\t\tdeployment batch size. For example, if desiredCount is four tasks,\n\t\t\t\t\ta maximum of 200% starts four new tasks before stopping the four older tasks\n\t\t\t\t\t(provided that the cluster resources required to do this are available).

    \n
  • \n
\n

When UpdateService\n\t\t\tstops a task during a deployment, the equivalent of docker stop is issued\n\t\t\tto the containers running in the task. This results in a SIGTERM and a\n\t\t\t30-second timeout. After this, SIGKILL is sent and the containers are\n\t\t\tforcibly stopped. If the container handles the SIGTERM gracefully and exits\n\t\t\twithin 30 seconds from receiving it, no SIGKILL is sent.

\n

When the service scheduler launches new tasks, it determines task placement in your\n\t\t\tcluster with the following logic.

\n
    \n
  • \n

    Determine which of the container instances in your cluster can support your\n\t\t\t\t\tservice's task definition. For example, they have the required CPU, memory,\n\t\t\t\t\tports, and container instance attributes.

    \n
  • \n
  • \n

    By default, the service scheduler attempts to balance tasks across\n\t\t\t\t\tAvailability Zones in this manner even though you can choose a different\n\t\t\t\t\tplacement strategy.

    \n
      \n
    • \n

      Sort the valid container instances by the fewest number of running\n\t\t\t\t\t\t\ttasks for this service in the same Availability Zone as the instance.\n\t\t\t\t\t\t\tFor example, if zone A has one running service task and zones B and C\n\t\t\t\t\t\t\teach have zero, valid container instances in either zone B or C are\n\t\t\t\t\t\t\tconsidered optimal for placement.

      \n
    • \n
    • \n

      Place the new service task on a valid container instance in an optimal\n\t\t\t\t\t\t\tAvailability Zone (based on the previous steps), favoring container\n\t\t\t\t\t\t\tinstances with the fewest number of running tasks for this\n\t\t\t\t\t\t\tservice.

      \n
    • \n
    \n
  • \n
\n

When the service scheduler stops running tasks, it attempts to maintain balance across\n\t\t\tthe Availability Zones in your cluster using the following logic:

\n
    \n
  • \n

    Sort the container instances by the largest number of running tasks for this\n\t\t\t\t\tservice in the same Availability Zone as the instance. For example, if zone A\n\t\t\t\t\thas one running service task and zones B and C each have two, container\n\t\t\t\t\tinstances in either zone B or C are considered optimal for termination.

    \n
  • \n
  • \n

    Stop the task on a container instance in an optimal Availability Zone (based\n\t\t\t\t\ton the previous steps), favoring container instances with the largest number of\n\t\t\t\t\trunning tasks for this service.

    \n
  • \n
\n \n

You must have a service-linked role when you update any of the following service\n\t\t\t\tproperties:

\n
    \n
  • \n

    \n loadBalancers,

    \n
  • \n
  • \n

    \n serviceRegistries\n

    \n
  • \n
\n

For more information about the role see the CreateService request\n\t\t\t\tparameter \n role\n .

\n
", "smithy.api#examples": [ { "title": "To change the number of tasks in a service", @@ -13865,7 +13877,7 @@ } ], "traits": { - "smithy.api#documentation": "

Modifies which task set in a service is the primary task set. Any parameters that are updated on the\n\t\t\tprimary task set in a service will transition to the service. This is used when a service uses the\n\t\t\t\tEXTERNAL deployment controller type. For more information, see Amazon ECS Deployment\n\t\t\t\tTypes in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Modifies which task set in a service is the primary task set. Any parameters that are\n\t\t\tupdated on the primary task set in a service will transition to the service. This is\n\t\t\tused when a service uses the EXTERNAL deployment controller type. For more\n\t\t\tinformation, see Amazon ECS Deployment\n\t\t\t\tTypes in the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#UpdateServicePrimaryTaskSetRequest": { @@ -13874,7 +13886,7 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task set exists\n\t\t\tin.

", + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task\n\t\t\tset exists in.

", "smithy.api#required": {} } }, @@ -13930,25 +13942,25 @@ "desiredCount": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The number of instantiations of the task to place and keep running in your service.

" + "smithy.api#documentation": "

The number of instantiations of the task to place and keep running in your\n\t\t\tservice.

" } }, "taskDefinition": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The family and revision (family:revision) or full ARN of the\n\t\t\ttask definition to run in your service. If a revision is not specified, the latest\n\t\t\t\tACTIVE revision is used. If you modify the task definition with\n\t\t\t\tUpdateService, Amazon ECS spawns a task with the new version of the task definition and\n\t\t\tthen stops an old task after the new version is running.

" + "smithy.api#documentation": "

The family and revision (family:revision) or\n\t\t\tfull ARN of the task definition to run in your service. If a revision is\n\t\t\tnot specified, the latest ACTIVE revision is used. If you modify the task\n\t\t\tdefinition with UpdateService, Amazon ECS spawns a task with the new version of\n\t\t\tthe task definition and then stops an old task after the new version is running.

" } }, "capacityProviderStrategy": { "target": "com.amazonaws.ecs#CapacityProviderStrategy", "traits": { - "smithy.api#documentation": "

The capacity provider strategy to update the service to use.

\n

if the service uses the default capacity provider strategy for the cluster, the service can be\n\t\t\tupdated to use one or more capacity providers as opposed to the default capacity provider strategy.\n\t\t\tHowever, when a service is using a capacity provider strategy that's not the default capacity provider\n\t\t\tstrategy, the service can't be updated to use the cluster's default capacity provider strategy.

\n

A capacity provider strategy consists of one or more capacity providers along with the\n\t\t\t\tbase and weight to assign to them. A capacity provider must be associated\n\t\t\twith the cluster to be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster.\n\t\t\tOnly capacity providers with an ACTIVE or UPDATING status can be used.

\n

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already\n\t\t\tbe created. New capacity providers can be created with the CreateClusterCapacityProvider API operation.

\n

To use a Fargate capacity provider, specify either the FARGATE or\n\t\t\t\tFARGATE_SPOT capacity providers. The Fargate capacity providers are available to all\n\t\t\taccounts and only need to be associated with a cluster to be used.

\n

The PutClusterCapacityProvidersAPI operation is used to update the list of available capacity\n\t\t\tproviders for a cluster after the cluster is created.

\n

" + "smithy.api#documentation": "

The capacity provider strategy to update the service to use.

\n

if the service uses the default capacity provider strategy for the cluster, the\n\t\t\tservice can be updated to use one or more capacity providers as opposed to the default\n\t\t\tcapacity provider strategy. However, when a service is using a capacity provider\n\t\t\tstrategy that's not the default capacity provider strategy, the service can't be updated\n\t\t\tto use the cluster's default capacity provider strategy.

\n

A capacity provider strategy consists of one or more capacity providers along with the\n\t\t\t\tbase and weight to assign to them. A capacity provider\n\t\t\tmust be associated with the cluster to be used in a capacity provider strategy. The\n\t\t\t\tPutClusterCapacityProviders API is used to associate a capacity provider\n\t\t\twith a cluster. Only capacity providers with an ACTIVE or\n\t\t\t\tUPDATING status can be used.

\n

If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New capacity providers can be created with the CreateClusterCapacityProvider API operation.

\n

To use a Fargate capacity provider, specify either the FARGATE or\n\t\t\t\tFARGATE_SPOT capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be\n\t\t\tused.

\n

The PutClusterCapacityProvidersAPI operation is used to update the list of\n\t\t\tavailable capacity providers for a cluster after the cluster is created.

\n

" } }, "deploymentConfiguration": { "target": "com.amazonaws.ecs#DeploymentConfiguration", "traits": { - "smithy.api#documentation": "

Optional deployment parameters that control how many tasks run during the deployment and the ordering\n\t\t\tof stopping and starting tasks.

" + "smithy.api#documentation": "

Optional deployment parameters that control how many tasks run during the deployment\n\t\t\tand the ordering of stopping and starting tasks.

" } }, "availabilityZoneRebalancing": { @@ -13966,62 +13978,62 @@ "placementConstraints": { "target": "com.amazonaws.ecs#PlacementConstraints", "traits": { - "smithy.api#documentation": "

An array of task placement constraint objects to update the service to use. If no value is specified,\n\t\t\tthe existing placement constraints for the service will remain unchanged. If this value is specified,\n\t\t\tit will override any existing placement constraints defined for the service. To remove all existing\n\t\t\tplacement constraints, specify an empty array.

\n

You can specify a maximum of 10 constraints for each task. This limit includes constraints in the\n\t\t\ttask definition and those specified at runtime.

" + "smithy.api#documentation": "

An array of task placement constraint objects to update the service to use. If no\n\t\t\tvalue is specified, the existing placement constraints for the service will remain\n\t\t\tunchanged. If this value is specified, it will override any existing placement\n\t\t\tconstraints defined for the service. To remove all existing placement constraints,\n\t\t\tspecify an empty array.

\n

You can specify a maximum of 10 constraints for each task. This limit includes\n\t\t\tconstraints in the task definition and those specified at runtime.

" } }, "placementStrategy": { "target": "com.amazonaws.ecs#PlacementStrategies", "traits": { - "smithy.api#documentation": "

The task placement strategy objects to update the service to use. If no value is specified, the\n\t\t\texisting placement strategy for the service will remain unchanged. If this value is specified, it will\n\t\t\toverride the existing placement strategy defined for the service. To remove an existing placement\n\t\t\tstrategy, specify an empty object.

\n

You can specify a maximum of five strategy rules for each service.

" + "smithy.api#documentation": "

The task placement strategy objects to update the service to use. If no value is\n\t\t\tspecified, the existing placement strategy for the service will remain unchanged. If\n\t\t\tthis value is specified, it will override the existing placement strategy defined for\n\t\t\tthe service. To remove an existing placement strategy, specify an empty object.

\n

You can specify a maximum of five strategy rules for each service.

" } }, "platformVersion": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The platform version that your tasks in the service run on. A platform version is only specified for\n\t\t\ttasks using the Fargate launch type. If a platform version is not specified, the\n\t\t\t\tLATEST platform version is used. For more information, see Fargate Platform Versions in\n\t\t\tthe Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The platform version that your tasks in the service run on. A platform version is only\n\t\t\tspecified for tasks using the Fargate launch type. If a platform version\n\t\t\tis not specified, the LATEST platform version is used. For more\n\t\t\tinformation, see Fargate Platform\n\t\t\t\tVersions in the Amazon Elastic Container Service Developer Guide.

" } }, "forceNewDeployment": { "target": "com.amazonaws.ecs#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Determines whether to force a new deployment of the service. By default, deployments aren't forced.\n\t\t\tYou can use this option to start a new deployment with no service definition changes. For example, you\n\t\t\tcan update a service's tasks to use a newer Docker image with the same image/tag combination\n\t\t\t\t(my_image:latest) or to roll Fargate tasks onto a newer platform version.

" + "smithy.api#documentation": "

Determines whether to force a new deployment of the service. By default, deployments\n\t\t\taren't forced. You can use this option to start a new deployment with no service\n\t\t\tdefinition changes. For example, you can update a service's tasks to use a newer Docker\n\t\t\timage with the same image/tag combination (my_image:latest) or to roll\n\t\t\tFargate tasks onto a newer platform version.

" } }, "healthCheckGracePeriodSeconds": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing, VPC Lattice, and container \n\t\t\thealth checks after a task has first started. If you don't specify a health check grace\n\t\t\tperiod value, the default value of 0 is used. If you don't use any of the health checks, \n\t\t\tthen healthCheckGracePeriodSeconds is unused.

\n

If your service's tasks take a while to start and respond to health checks, you can specify a\n\t\t\thealth check grace period of up to 2,147,483,647 seconds (about 69 years). During that time, the Amazon ECS\n\t\t\tservice scheduler ignores health check status. This grace period can prevent the service scheduler from\n\t\t\tmarking tasks as unhealthy and stopping them before they have time to come up.

" + "smithy.api#documentation": "

The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy\n\t\t\tElastic Load Balancing, VPC Lattice, and container health checks after a task has first started. If you don't\n\t\t\tspecify a health check grace period value, the default value of 0 is used.\n\t\t\tIf you don't use any of the health checks, then\n\t\t\t\thealthCheckGracePeriodSeconds is unused.

\n

If your service's tasks take a while to start and respond to health checks, you can\n\t\t\tspecify a health check grace period of up to 2,147,483,647 seconds (about 69 years).\n\t\t\tDuring that time, the Amazon ECS service scheduler ignores health check status. This grace\n\t\t\tperiod can prevent the service scheduler from marking tasks as unhealthy and stopping\n\t\t\tthem before they have time to come up.

" } }, "enableExecuteCommand": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

If true, this enables execute command functionality on all task containers.

\n

If you do not want to override the value that was set when the service was created, you can set this\n\t\t\tto null when performing this action.

" + "smithy.api#documentation": "

If true, this enables execute command functionality on all task\n\t\t\tcontainers.

\n

If you do not want to override the value that was set when the service was created,\n\t\t\tyou can set this to null when performing this action.

" } }, "enableECSManagedTags": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

Determines whether to turn on Amazon ECS managed tags for the tasks in the service. For more information,\n\t\t\tsee Tagging Your\n\t\t\t\tAmazon ECS Resources in the Amazon Elastic Container Service Developer Guide.

\n

Only tasks launched after the update will reflect the update. To update the tags on all tasks, set\n\t\t\t\tforceNewDeployment to true, so that Amazon ECS starts new tasks with the\n\t\t\tupdated tags.

" + "smithy.api#documentation": "

Determines whether to turn on Amazon ECS managed tags for the tasks in the service. For\n\t\t\tmore information, see Tagging Your Amazon ECS\n\t\t\t\tResources in the Amazon Elastic Container Service Developer Guide.

\n

Only tasks launched after the update will reflect the update. To update the tags on\n\t\t\tall tasks, set forceNewDeployment to true, so that Amazon ECS\n\t\t\tstarts new tasks with the updated tags.

" } }, "loadBalancers": { "target": "com.amazonaws.ecs#LoadBalancers", "traits": { - "smithy.api#documentation": "

A list of Elastic Load Balancing load balancer objects. It contains the load balancer name, the container name, and\n\t\t\tthe container port to access from the load balancer. The container name is as it appears in a container\n\t\t\tdefinition.

\n

When you add, update, or remove a load balancer configuration, Amazon ECS starts new tasks with the\n\t\t\tupdated Elastic Load Balancing configuration, and then stops the old tasks when the new tasks are running.

\n

For services that use rolling updates, you can add, update, or remove Elastic Load Balancing target groups. You can\n\t\t\tupdate from a single target group to multiple target groups and from multiple target groups to a single\n\t\t\ttarget group.

\n

For services that use blue/green deployments, you can update Elastic Load Balancing target groups by using\n\t\t\t\t\t\n CreateDeployment\n \n\t\t\tthrough CodeDeploy. Note that multiple target groups are not supported for blue/green deployments. For more\n\t\t\tinformation see Register multiple target\n\t\t\t\tgroups with a service in the Amazon Elastic Container Service Developer Guide.

\n

For services that use the external deployment controller, you can add, update, or remove load\n\t\t\tbalancers by using CreateTaskSet. Note that\n\t\t\tmultiple target groups are not supported for external deployments. For more information see Register multiple target groups with a service in the Amazon Elastic Container Service Developer Guide.

\n

You can remove existing loadBalancers by passing an empty list.

" + "smithy.api#documentation": "

A list of Elastic Load Balancing load balancer objects. It contains the load balancer name, the\n\t\t\tcontainer name, and the container port to access from the load balancer. The container\n\t\t\tname is as it appears in a container definition.

\n

When you add, update, or remove a load balancer configuration, Amazon ECS starts new tasks\n\t\t\twith the updated Elastic Load Balancing configuration, and then stops the old tasks when the new tasks\n\t\t\tare running.

\n

For services that use rolling updates, you can add, update, or remove Elastic Load Balancing target\n\t\t\tgroups. You can update from a single target group to multiple target groups and from\n\t\t\tmultiple target groups to a single target group.

\n

For services that use blue/green deployments, you can update Elastic Load Balancing target groups by\n\t\t\tusing \n CreateDeployment\n through CodeDeploy. Note that multiple target groups\n\t\t\tare not supported for blue/green deployments. For more information see Register\n\t\t\t\tmultiple target groups with a service in the Amazon Elastic Container Service Developer Guide.

\n

For services that use the external deployment controller, you can add, update, or\n\t\t\tremove load balancers by using CreateTaskSet.\n\t\t\tNote that multiple target groups are not supported for external deployments. For more\n\t\t\tinformation see Register\n\t\t\t\tmultiple target groups with a service in the Amazon Elastic Container Service Developer Guide.

\n

You can remove existing loadBalancers by passing an empty list.

" } }, "propagateTags": { "target": "com.amazonaws.ecs#PropagateTags", "traits": { - "smithy.api#documentation": "

Determines whether to propagate the tags from the task definition or the service to the task. If no\n\t\t\tvalue is specified, the tags aren't propagated.

\n

Only tasks launched after the update will reflect the update. To update the tags on all tasks, set\n\t\t\t\tforceNewDeployment to true, so that Amazon ECS starts new tasks with the\n\t\t\tupdated tags.

" + "smithy.api#documentation": "

Determines whether to propagate the tags from the task definition or the service to\n\t\t\tthe task. If no value is specified, the tags aren't propagated.

\n

Only tasks launched after the update will reflect the update. To update the tags on\n\t\t\tall tasks, set forceNewDeployment to true, so that Amazon ECS\n\t\t\tstarts new tasks with the updated tags.

" } }, "serviceRegistries": { "target": "com.amazonaws.ecs#ServiceRegistries", "traits": { - "smithy.api#documentation": "

The details for the service discovery registries to assign to this service. For more information, see\n\t\t\t\tService\n\t\t\t\tDiscovery.

\n

When you add, update, or remove the service registries configuration, Amazon ECS starts new tasks with the\n\t\t\tupdated service registries configuration, and then stops the old tasks when the new tasks are\n\t\t\trunning.

\n

You can remove existing serviceRegistries by passing an empty list.

" + "smithy.api#documentation": "

The details for the service discovery registries to assign to this service. For more\n\t\t\tinformation, see Service\n\t\t\t\tDiscovery.

\n

When you add, update, or remove the service registries configuration, Amazon ECS starts new\n\t\t\ttasks with the updated service registries configuration, and then stops the old tasks\n\t\t\twhen the new tasks are running.

\n

You can remove existing serviceRegistries by passing an empty\n\t\t\tlist.

" } }, "serviceConnectConfiguration": { @@ -14033,13 +14045,13 @@ "volumeConfigurations": { "target": "com.amazonaws.ecs#ServiceVolumeConfigurations", "traits": { - "smithy.api#documentation": "

The details of the volume that was configuredAtLaunch. You can configure the size,\n\t\t\tvolumeType, IOPS, throughput, snapshot and encryption in ServiceManagedEBSVolumeConfiguration. The name of the volume must match the\n\t\t\t\tname from the task definition. If set to null, no new deployment is triggered.\n\t\t\tOtherwise, if this configuration differs from the existing one, it triggers a new deployment.

" + "smithy.api#documentation": "

The details of the volume that was configuredAtLaunch. You can configure\n\t\t\tthe size, volumeType, IOPS, throughput, snapshot and encryption in ServiceManagedEBSVolumeConfiguration. The name of the volume\n\t\t\tmust match the name from the task definition. If set to null, no new\n\t\t\tdeployment is triggered. Otherwise, if this configuration differs from the existing one,\n\t\t\tit triggers a new deployment.

" } }, "vpcLatticeConfigurations": { "target": "com.amazonaws.ecs#VpcLatticeConfigurations", "traits": { - "smithy.api#documentation": "

An object representing the VPC Lattice configuration for the service being updated.

" + "smithy.api#documentation": "

An object representing the VPC Lattice configuration for the service being\n\t\t\tupdated.

" } } }, @@ -14093,7 +14105,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the protection status of a task. You can set protectionEnabled to\n\t\t\t\ttrue to protect your task from termination during scale-in events from Service\n\t\t\t\tAutoscaling or deployments.

\n

Task-protection, by default, expires after 2 hours at which point Amazon ECS clears the\n\t\t\t\tprotectionEnabled property making the task eligible for termination by a subsequent\n\t\t\tscale-in event.

\n

You can specify a custom expiration period for task protection from 1 minute to up to 2,880 minutes\n\t\t\t(48 hours). To specify the custom expiration period, set the expiresInMinutes property.\n\t\t\tThe expiresInMinutes property is always reset when you invoke this operation for a task\n\t\t\tthat already has protectionEnabled set to true. You can keep extending the\n\t\t\tprotection expiration period of a task by invoking this operation repeatedly.

\n

To learn more about Amazon ECS task protection, see Task scale-in\n\t\t\t\tprotection in the \n Amazon Elastic Container Service Developer Guide\n .

\n \n

This operation is only supported for tasks belonging to an Amazon ECS service. Invoking this operation\n\t\t\t\tfor a standalone task will result in an TASK_NOT_VALID failure. For more information,\n\t\t\t\tsee API failure reasons.

\n
\n \n

If you prefer to set task protection from within the container, we recommend using the Task scale-in\n\t\t\t\t\tprotection endpoint.

\n
", + "smithy.api#documentation": "

Updates the protection status of a task. You can set protectionEnabled to\n\t\t\t\ttrue to protect your task from termination during scale-in events from\n\t\t\t\tService\n\t\t\t\tAutoscaling or deployments.

\n

Task-protection, by default, expires after 2 hours at which point Amazon ECS clears the\n\t\t\t\tprotectionEnabled property making the task eligible for termination by\n\t\t\ta subsequent scale-in event.

\n

You can specify a custom expiration period for task protection from 1 minute to up to\n\t\t\t2,880 minutes (48 hours). To specify the custom expiration period, set the\n\t\t\t\texpiresInMinutes property. The expiresInMinutes property\n\t\t\tis always reset when you invoke this operation for a task that already has\n\t\t\t\tprotectionEnabled set to true. You can keep extending the\n\t\t\tprotection expiration period of a task by invoking this operation repeatedly.

\n

To learn more about Amazon ECS task protection, see Task scale-in\n\t\t\t\tprotection in the \n Amazon Elastic Container Service Developer Guide\n .

\n \n

This operation is only supported for tasks belonging to an Amazon ECS service. Invoking\n\t\t\t\tthis operation for a standalone task will result in an TASK_NOT_VALID\n\t\t\t\tfailure. For more information, see API failure\n\t\t\t\t\treasons.

\n
\n \n

If you prefer to set task protection from within the container, we recommend using\n\t\t\t\tthe Task scale-in protection endpoint.

\n
", "smithy.api#examples": [ { "title": "To remove task scale-in protection", @@ -14167,7 +14179,7 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task sets exist\n\t\t\tin.

", + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task\n\t\t\tsets exist in.

", "smithy.api#required": {} } }, @@ -14182,14 +14194,14 @@ "target": "com.amazonaws.ecs#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Specify true to mark a task for protection and false to unset protection,\n\t\t\tmaking it eligible for termination.

", + "smithy.api#documentation": "

Specify true to mark a task for protection and false to\n\t\t\tunset protection, making it eligible for termination.

", "smithy.api#required": {} } }, "expiresInMinutes": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

If you set protectionEnabled to true, you can specify the duration for task\n\t\t\tprotection in minutes. You can specify a value from 1 minute to up to 2,880 minutes (48 hours). During\n\t\t\tthis time, your task will not be terminated by scale-in events from Service Auto Scaling or\n\t\t\tdeployments. After this time period lapses, protectionEnabled will be reset to\n\t\t\t\tfalse.

\n

If you don’t specify the time, then the task is automatically protected for 120 minutes (2\n\t\t\thours).

" + "smithy.api#documentation": "

If you set protectionEnabled to true, you can specify the\n\t\t\tduration for task protection in minutes. You can specify a value from 1 minute to up to\n\t\t\t2,880 minutes (48 hours). During this time, your task will not be terminated by scale-in\n\t\t\tevents from Service Auto Scaling or deployments. After this time period lapses,\n\t\t\t\tprotectionEnabled will be reset to false.

\n

If you don’t specify the time, then the task is automatically protected for 120\n\t\t\tminutes (2 hours).

" } } }, @@ -14203,7 +14215,7 @@ "protectedTasks": { "target": "com.amazonaws.ecs#ProtectedTasks", "traits": { - "smithy.api#documentation": "

A list of tasks with the following information.

\n
    \n
  • \n

    \n taskArn: The task ARN.

    \n
  • \n
  • \n

    \n protectionEnabled: The protection status of the task. If scale-in protection is\n\t\t\t\t\tturned on for a task, the value is true. Otherwise, it is\n\t\t\t\t\tfalse.

    \n
  • \n
  • \n

    \n expirationDate: The epoch time when protection for the task will expire.

    \n
  • \n
" + "smithy.api#documentation": "

A list of tasks with the following information.

\n
    \n
  • \n

    \n taskArn: The task ARN.

    \n
  • \n
  • \n

    \n protectionEnabled: The protection status of the task. If scale-in\n\t\t\t\t\tprotection is turned on for a task, the value is true. Otherwise,\n\t\t\t\t\tit is false.

    \n
  • \n
  • \n

    \n expirationDate: The epoch time when protection for the task will\n\t\t\t\t\texpire.

    \n
  • \n
" } }, "failures": { @@ -14255,7 +14267,7 @@ } ], "traits": { - "smithy.api#documentation": "

Modifies a task set. This is used when a service uses the EXTERNAL deployment controller\n\t\t\ttype. For more information, see Amazon ECS Deployment Types in the\n\t\t\tAmazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Modifies a task set. This is used when a service uses the EXTERNAL\n\t\t\tdeployment controller type. For more information, see Amazon ECS Deployment\n\t\t\t\tTypes in the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#UpdateTaskSetRequest": { @@ -14264,7 +14276,7 @@ "cluster": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task set is found\n\t\t\tin.

", + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task\n\t\t\tset is found in.

", "smithy.api#required": {} } }, @@ -14285,7 +14297,7 @@ "scale": { "target": "com.amazonaws.ecs#Scale", "traits": { - "smithy.api#documentation": "

A floating-point percentage of the desired number of tasks to place and keep running in the task\n\t\t\tset.

", + "smithy.api#documentation": "

A floating-point percentage of the desired number of tasks to place and keep running\n\t\t\tin the task set.

", "smithy.api#required": {} } } @@ -14337,7 +14349,7 @@ "agentHash": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Git commit hash for the Amazon ECS container agent build on the amazon-ecs-agent GitHub\n\t\t\trepository.

" + "smithy.api#documentation": "

The Git commit hash for the Amazon ECS container agent build on the amazon-ecs-agent\n\t\t\t GitHub repository.

" } }, "dockerVersion": { @@ -14348,7 +14360,7 @@ } }, "traits": { - "smithy.api#documentation": "

The Docker and Amazon ECS container agent version information about a container instance.

" + "smithy.api#documentation": "

The Docker and Amazon ECS container agent version information about a container\n\t\t\tinstance.

" } }, "com.amazonaws.ecs#Volume": { @@ -14357,42 +14369,42 @@ "name": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.

\n

When using a volume configured at launch, the name is required and must also be\n\t\t\tspecified as the volume name in the ServiceVolumeConfiguration or\n\t\t\t\tTaskVolumeConfiguration parameter when creating your service or standalone\n\t\t\ttask.

\n

For all other types of volumes, this name is referenced in the sourceVolume parameter of\n\t\t\tthe mountPoints object in the container definition.

\n

When a volume is using the efsVolumeConfiguration, the name is required.

" + "smithy.api#documentation": "

The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.

\n

When using a volume configured at launch, the name is required and must\n\t\t\talso be specified as the volume name in the ServiceVolumeConfiguration or\n\t\t\t\tTaskVolumeConfiguration parameter when creating your service or\n\t\t\tstandalone task.

\n

For all other types of volumes, this name is referenced in the\n\t\t\t\tsourceVolume parameter of the mountPoints object in the\n\t\t\tcontainer definition.

\n

When a volume is using the efsVolumeConfiguration, the name is\n\t\t\trequired.

" } }, "host": { "target": "com.amazonaws.ecs#HostVolumeProperties", "traits": { - "smithy.api#documentation": "

This parameter is specified when you use bind mount host volumes. The contents of the\n\t\t\t\thost parameter determine whether your bind mount host volume persists on the host\n\t\t\tcontainer instance and where it's stored. If the host parameter is empty, then the Docker\n\t\t\tdaemon assigns a host path for your data volume. However, the data isn't guaranteed to persist after\n\t\t\tthe containers that are associated with it stop running.

\n

Windows containers can mount whole directories on the same drive as $env:ProgramData.\n\t\t\tWindows containers can't mount directories on a different drive, and mount point can't be across\n\t\t\tdrives. For example, you can mount C:\\my\\path:C:\\my\\path and D:\\:D:\\, but not\n\t\t\t\tD:\\my\\path:C:\\my\\path or D:\\:C:\\my\\path.

" + "smithy.api#documentation": "

This parameter is specified when you use bind mount host volumes. The contents of the\n\t\t\t\thost parameter determine whether your bind mount host volume persists\n\t\t\ton the host container instance and where it's stored. If the host parameter\n\t\t\tis empty, then the Docker daemon assigns a host path for your data volume. However, the\n\t\t\tdata isn't guaranteed to persist after the containers that are associated with it stop\n\t\t\trunning.

\n

Windows containers can mount whole directories on the same drive as\n\t\t\t\t$env:ProgramData. Windows containers can't mount directories on a\n\t\t\tdifferent drive, and mount point can't be across drives. For example, you can mount\n\t\t\t\tC:\\my\\path:C:\\my\\path and D:\\:D:\\, but not\n\t\t\t\tD:\\my\\path:C:\\my\\path or D:\\:C:\\my\\path.

" } }, "dockerVolumeConfiguration": { "target": "com.amazonaws.ecs#DockerVolumeConfiguration", "traits": { - "smithy.api#documentation": "

This parameter is specified when you use Docker volumes.

\n

Windows containers only support the use of the local driver. To use bind mounts, specify\n\t\t\tthe host parameter instead.

\n \n

Docker volumes aren't supported by tasks run on Fargate.

\n
" + "smithy.api#documentation": "

This parameter is specified when you use Docker volumes.

\n

Windows containers only support the use of the local driver. To use bind\n\t\t\tmounts, specify the host parameter instead.

\n \n

Docker volumes aren't supported by tasks run on Fargate.

\n
" } }, "efsVolumeConfiguration": { "target": "com.amazonaws.ecs#EFSVolumeConfiguration", "traits": { - "smithy.api#documentation": "

This parameter is specified when you use an Amazon Elastic File System file system for task storage.

" + "smithy.api#documentation": "

This parameter is specified when you use an Amazon Elastic File System file system for task\n\t\t\tstorage.

" } }, "fsxWindowsFileServerVolumeConfiguration": { "target": "com.amazonaws.ecs#FSxWindowsFileServerVolumeConfiguration", "traits": { - "smithy.api#documentation": "

This parameter is specified when you use Amazon FSx for Windows File Server file system for task storage.

" + "smithy.api#documentation": "

This parameter is specified when you use Amazon FSx for Windows File Server file system for task\n\t\t\tstorage.

" } }, "configuredAtLaunch": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

Indicates whether the volume should be configured at launch time. This is used to create Amazon EBS\n\t\t\tvolumes for standalone tasks or tasks created as part of a service. Each task definition revision may\n\t\t\tonly have one volume configured at launch in the volume configuration.

\n

To configure a volume at launch time, use this task definition revision and specify a\n\t\t\t\tvolumeConfigurations object when calling the CreateService,\n\t\t\t\tUpdateService, RunTask or StartTask APIs.

" + "smithy.api#documentation": "

Indicates whether the volume should be configured at launch time. This is used to\n\t\t\tcreate Amazon EBS volumes for standalone tasks or tasks created as part of a service. Each\n\t\t\ttask definition revision may only have one volume configured at launch in the volume\n\t\t\tconfiguration.

\n

To configure a volume at launch time, use this task definition revision and specify a\n\t\t\t\tvolumeConfigurations object when calling the\n\t\t\tCreateService, UpdateService, RunTask or\n\t\t\t\tStartTask APIs.

" } } }, "traits": { - "smithy.api#documentation": "

The data volume configuration for tasks launched using this task definition. Specifying a volume\n\t\t\tconfiguration in a task definition is optional. The volume configuration may contain multiple volumes\n\t\t\tbut only one volume configured at launch is supported. Each volume defined in the volume configuration\n\t\t\tmay only specify a name and one of either configuredAtLaunch,\n\t\t\t\tdockerVolumeConfiguration, efsVolumeConfiguration,\n\t\t\t\tfsxWindowsFileServerVolumeConfiguration, or host. If an empty volume\n\t\t\tconfiguration is specified, by default Amazon ECS uses a host volume. For more information, see Using data\n\t\t\t\tvolumes in tasks.

" + "smithy.api#documentation": "

The data volume configuration for tasks launched using this task definition.\n\t\t\tSpecifying a volume configuration in a task definition is optional. The volume\n\t\t\tconfiguration may contain multiple volumes but only one volume configured at launch is\n\t\t\tsupported. Each volume defined in the volume configuration may only specify a\n\t\t\t\tname and one of either configuredAtLaunch,\n\t\t\t\tdockerVolumeConfiguration, efsVolumeConfiguration,\n\t\t\t\tfsxWindowsFileServerVolumeConfiguration, or host. If an\n\t\t\tempty volume configuration is specified, by default Amazon ECS uses a host volume. For more\n\t\t\tinformation, see Using data volumes in\n\t\t\t\ttasks.

" } }, "com.amazonaws.ecs#VolumeFrom": { @@ -14401,13 +14413,13 @@ "sourceContainer": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of another container within the same task definition to mount volumes from.

" + "smithy.api#documentation": "

The name of another container within the same task definition to mount volumes\n\t\t\tfrom.

" } }, "readOnly": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

If this value is true, the container has read-only access to the volume. If this value\n\t\t\tis false, then the container can write to the volume. The default value is\n\t\t\t\tfalse.

" + "smithy.api#documentation": "

If this value is true, the container has read-only access to the volume.\n\t\t\tIf this value is false, then the container can write to the volume. The\n\t\t\tdefault value is false.

" } } }, @@ -14433,7 +14445,7 @@ "roleArn": { "target": "com.amazonaws.ecs#IAMRoleArn", "traits": { - "smithy.api#documentation": "

The ARN of the IAM role to associate with this VPC Lattice configuration. This is the Amazon ECS\u2028\n\t\t\tinfrastructure IAM role that is used to manage your VPC Lattice infrastructure.

", + "smithy.api#documentation": "

The ARN of the IAM role to associate with this VPC Lattice configuration. This is the\n\t\t\tAmazon ECS\u2028 infrastructure IAM role that is used to manage your VPC Lattice\n\t\t\tinfrastructure.

", "smithy.api#required": {} } }, @@ -14447,13 +14459,13 @@ "portName": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of the port mapping to register in the VPC Lattice target group. This is the \n\t\t\tname of the portMapping you defined in your task definition.

", + "smithy.api#documentation": "

The name of the port mapping to register in the VPC Lattice target group. This is the name\n\t\t\tof the portMapping you defined in your task definition.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

The VPC Lattice configuration for your service that holds the information for the target group(s) \n\t\t\tAmazon ECS tasks will be registered to.

" + "smithy.api#documentation": "

The VPC Lattice configuration for your service that holds the information for the target\n\t\t\tgroup(s) Amazon ECS tasks will be registered to.

" } }, "com.amazonaws.ecs#VpcLatticeConfigurations": { diff --git a/models/eks.json b/models/eks.json index 06f60c4a44..d4cc65bd5e 100644 --- a/models/eks.json +++ b/models/eks.json @@ -3843,6 +3843,12 @@ "smithy.api#documentation": "

The node group update configuration.

" } }, + "nodeRepairConfig": { + "target": "com.amazonaws.eks#NodeRepairConfig", + "traits": { + "smithy.api#documentation": "

The node auto repair configuration for the node group.

" + } + }, "capacityType": { "target": "com.amazonaws.eks#CapacityTypes", "traits": { @@ -8400,6 +8406,20 @@ "smithy.api#documentation": "

Information about an Amazon EKS add-on from the Amazon Web Services Marketplace.

" } }, + "com.amazonaws.eks#NodeRepairConfig": { + "type": "structure", + "members": { + "enabled": { + "target": "com.amazonaws.eks#BoxedBoolean", + "traits": { + "smithy.api#documentation": "

Specifies whether to enable node auto repair for the node group. Node auto repair is \n disabled by default.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The node auto repair configuration for the node group.

" + } + }, "com.amazonaws.eks#Nodegroup": { "type": "structure", "members": { @@ -8529,6 +8549,12 @@ "smithy.api#documentation": "

The node group update configuration.

" } }, + "nodeRepairConfig": { + "target": "com.amazonaws.eks#NodeRepairConfig", + "traits": { + "smithy.api#documentation": "

The node auto repair configuration for the node group.

" + } + }, "launchTemplate": { "target": "com.amazonaws.eks#LaunchTemplateSpecification", "traits": { @@ -9383,13 +9409,13 @@ "remoteNodeNetworks": { "target": "com.amazonaws.eks#RemoteNodeNetworkList", "traits": { - "smithy.api#documentation": "

The list of network CIDRs that can contain hybrid nodes.

" + "smithy.api#documentation": "

The list of network CIDRs that can contain hybrid nodes.

\n

These CIDR blocks define the expected IP address range of the hybrid nodes that join\n the cluster. These blocks are typically determined by your network administrator.

\n

Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, \n 10.2.0.0/16).

\n

It must satisfy the following requirements:

\n
    \n
  • \n

    Each block must be within an IPv4 RFC-1918 network range. Minimum\n allowed size is /24, maximum allowed size is /8. Publicly-routable addresses\n aren't supported.

    \n
  • \n
  • \n

    Each block cannot overlap with the range of the VPC CIDR blocks for your EKS\n resources, or the block of the Kubernetes service IP range.

    \n
  • \n
  • \n

    Each block must have a route to the VPC that uses the VPC CIDR blocks, not\n public IPs or Elastic IPs. There are many options including Transit Gateway,\n Site-to-Site VPN, or Direct Connect.

    \n
  • \n
  • \n

    Each host must allow outbound connection to the EKS cluster control plane on\n TCP ports 443 and 10250.

    \n
  • \n
  • \n

    Each host must allow inbound connection from the EKS cluster control plane on\n TCP port 10250 for logs, exec and port-forward operations.

    \n
  • \n
  • \n

    Each host must allow TCP and UDP network connectivity to and from other hosts\n that are running CoreDNS on UDP port 53 for service and pod DNS\n names.

    \n
  • \n
" } }, "remotePodNetworks": { "target": "com.amazonaws.eks#RemotePodNetworkList", "traits": { - "smithy.api#documentation": "

The list of network CIDRs that can contain pods that run Kubernetes webhooks on hybrid nodes.

" + "smithy.api#documentation": "

The list of network CIDRs that can contain pods that run Kubernetes webhooks on hybrid nodes.

\n

These CIDR blocks are determined by configuring your Container Network Interface (CNI)\n plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't\n available for on-premises and edge locations.

\n

Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, \n 10.2.0.0/16).

\n

It must satisfy the following requirements:

\n
    \n
  • \n

    Each block must be within an IPv4 RFC-1918 network range. Minimum\n allowed size is /24, maximum allowed size is /8. Publicly-routable addresses\n aren't supported.

    \n
  • \n
  • \n

    Each block cannot overlap with the range of the VPC CIDR blocks for your EKS\n resources, or the block of the Kubernetes service IP range.

    \n
  • \n
" } } }, @@ -9423,12 +9449,12 @@ "cidrs": { "target": "com.amazonaws.eks#StringList", "traits": { - "smithy.api#documentation": "

A network CIDR that can contain hybrid nodes.

" + "smithy.api#documentation": "

A network CIDR that can contain hybrid nodes.

\n

These CIDR blocks define the expected IP address range of the hybrid nodes that join\n the cluster. These blocks are typically determined by your network administrator.

\n

Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, \n 10.2.0.0/16).

\n

It must satisfy the following requirements:

\n
    \n
  • \n

    Each block must be within an IPv4 RFC-1918 network range. Minimum\n allowed size is /24, maximum allowed size is /8. Publicly-routable addresses\n aren't supported.

    \n
  • \n
  • \n

    Each block cannot overlap with the range of the VPC CIDR blocks for your EKS\n resources, or the block of the Kubernetes service IP range.

    \n
  • \n
  • \n

    Each block must have a route to the VPC that uses the VPC CIDR blocks, not\n public IPs or Elastic IPs. There are many options including Transit Gateway,\n Site-to-Site VPN, or Direct Connect.

    \n
  • \n
  • \n

    Each host must allow outbound connection to the EKS cluster control plane on\n TCP ports 443 and 10250.

    \n
  • \n
  • \n

    Each host must allow inbound connection from the EKS cluster control plane on\n TCP port 10250 for logs, exec and port-forward operations.

    \n
  • \n
  • \n

    Each host must allow TCP and UDP network connectivity to and from other hosts\n that are running CoreDNS on UDP port 53 for service and pod DNS\n names.

    \n
  • \n
" } } }, "traits": { - "smithy.api#documentation": "

A network CIDR that can contain hybrid nodes.

" + "smithy.api#documentation": "

A network CIDR that can contain hybrid nodes.

\n

These CIDR blocks define the expected IP address range of the hybrid nodes that join\n the cluster. These blocks are typically determined by your network administrator.

\n

Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, \n 10.2.0.0/16).

\n

It must satisfy the following requirements:

\n
    \n
  • \n

    Each block must be within an IPv4 RFC-1918 network range. Minimum\n allowed size is /24, maximum allowed size is /8. Publicly-routable addresses\n aren't supported.

    \n
  • \n
  • \n

    Each block cannot overlap with the range of the VPC CIDR blocks for your EKS\n resources, or the block of the Kubernetes service IP range.

    \n
  • \n
  • \n

    Each block must have a route to the VPC that uses the VPC CIDR blocks, not\n public IPs or Elastic IPs. There are many options including Transit Gateway,\n Site-to-Site VPN, or Direct Connect.

    \n
  • \n
  • \n

    Each host must allow outbound connection to the EKS cluster control plane on\n TCP ports 443 and 10250.

    \n
  • \n
  • \n

    Each host must allow inbound connection from the EKS cluster control plane on\n TCP port 10250 for logs, exec and port-forward operations.

    \n
  • \n
  • \n

    Each host must allow TCP and UDP network connectivity to and from other hosts\n that are running CoreDNS on UDP port 53 for service and pod DNS\n names.

    \n
  • \n
" } }, "com.amazonaws.eks#RemoteNodeNetworkList": { @@ -9449,12 +9475,12 @@ "cidrs": { "target": "com.amazonaws.eks#StringList", "traits": { - "smithy.api#documentation": "

A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes.

" + "smithy.api#documentation": "

A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes.

\n

These CIDR blocks are determined by configuring your Container Network Interface (CNI)\n plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't\n available for on-premises and edge locations.

\n

Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, \n 10.2.0.0/16).

\n

It must satisfy the following requirements:

\n
    \n
  • \n

    Each block must be within an IPv4 RFC-1918 network range. Minimum\n allowed size is /24, maximum allowed size is /8. Publicly-routable addresses\n aren't supported.

    \n
  • \n
  • \n

    Each block cannot overlap with the range of the VPC CIDR blocks for your EKS\n resources, or the block of the Kubernetes service IP range.

    \n
  • \n
" } } }, "traits": { - "smithy.api#documentation": "

A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes.

" + "smithy.api#documentation": "

A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes.

\n

These CIDR blocks are determined by configuring your Container Network Interface (CNI)\n plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't\n available for on-premises and edge locations.

\n

Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, \n 10.2.0.0/16).

\n

It must satisfy the following requirements:

\n
    \n
  • \n

    Each block must be within an IPv4 RFC-1918 network range. Minimum\n allowed size is /24, maximum allowed size is /8. Publicly-routable addresses\n aren't supported.

    \n
  • \n
  • \n

    Each block cannot overlap with the range of the VPC CIDR blocks for your EKS\n resources, or the block of the Kubernetes service IP range.

    \n
  • \n
" } }, "com.amazonaws.eks#RemotePodNetworkList": { @@ -10614,6 +10640,12 @@ "smithy.api#documentation": "

The node group update configuration.

" } }, + "nodeRepairConfig": { + "target": "com.amazonaws.eks#NodeRepairConfig", + "traits": { + "smithy.api#documentation": "

The node auto repair configuration for the node group.

" + } + }, "clientRequestToken": { "target": "com.amazonaws.eks#String", "traits": { @@ -10902,6 +10934,12 @@ "smithy.api#enumValue": "MaxUnavailablePercentage" } }, + "NODE_REPAIR_ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NodeRepairEnabled" + } + }, "CONFIGURATION_VALUES": { "target": "smithy.api#Unit", "traits": { diff --git a/models/emr-serverless.json b/models/emr-serverless.json index 5da81b262f..14a6a9b411 100644 --- a/models/emr-serverless.json +++ b/models/emr-serverless.json @@ -1766,6 +1766,13 @@ "smithy.api#documentation": "

An optimal parameter that indicates the amount of attempts for the job. If not specified,\n this value defaults to the attempt of the latest job.

", "smithy.api#httpQuery": "attempt" } + }, + "accessSystemProfileLogs": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Allows access to system profile logs for Lake Formation-enabled jobs. Default is false.

", + "smithy.api#httpQuery": "accessSystemProfileLogs" + } } } }, @@ -1949,7 +1956,7 @@ "min": 1, "max": 1024 }, - "smithy.api#pattern": "^([a-z0-9]+[a-z0-9-.]*)\\/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*\\/)*[a-z0-9]+(?:[._-][a-z0-9]+)*)(?:\\:([a-zA-Z0-9_][a-zA-Z0-9-._]{0,299})|@(sha256:[0-9a-f]{64}))$" + "smithy.api#pattern": "^([0-9]{12})\\.dkr\\.ecr\\.([a-z0-9-]+).([a-z0-9._-]+)\\/((?:[a-z0-9]+(?:[-._][a-z0-9]+)*/)*[a-z0-9]+(?:[-._][a-z0-9]+)*)(?::([a-zA-Z0-9_]+[a-zA-Z0-9-._]*)|@(sha256:[0-9a-f]{64}))$" } }, "com.amazonaws.emrserverless#InitScriptPath": { diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 1cbdc68d02..1c7d34cccf 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -2229,6 +2229,12 @@ "tags" : [ "dualstack" ] } ] }, + "ap-southeast-5" : { + "variants" : [ { + "hostname" : "athena.ap-southeast-5.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "athena-fips.ca-central-1.amazonaws.com", @@ -4505,27 +4511,132 @@ }, "cognito-identity" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ca-central-1" : { }, - "ca-west-1" : { }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "cognito-identity.af-south-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "cognito-identity.ap-east-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "cognito-identity.ap-northeast-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "cognito-identity.ap-northeast-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "cognito-identity.ap-northeast-3.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "cognito-identity.ap-south-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "cognito-identity.ap-south-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "cognito-identity.ap-southeast-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "cognito-identity.ap-southeast-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "cognito-identity.ap-southeast-3.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "cognito-identity.ap-southeast-4.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "cognito-identity.ca-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "cognito-identity.ca-west-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "cognito-identity.eu-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "cognito-identity.eu-central-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "cognito-identity.eu-north-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "cognito-identity.eu-south-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "cognito-identity.eu-south-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "cognito-identity.eu-west-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "cognito-identity.eu-west-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "cognito-identity.eu-west-3.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -4554,32 +4665,76 @@ "deprecated" : true, "hostname" : "cognito-identity-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "cognito-identity.il-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "cognito-identity.me-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "cognito-identity.me-south-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "cognito-identity.sa-east-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { + "hostname" : "cognito-identity-fips.us-east-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "cognito-identity-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cognito-identity.us-east-1.amazonaws.com", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { + "hostname" : "cognito-identity-fips.us-east-2.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "cognito-identity-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cognito-identity.us-east-2.amazonaws.com", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { + "hostname" : "cognito-identity-fips.us-west-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "cognito-identity-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cognito-identity.us-west-1.amazonaws.com", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { + "hostname" : "cognito-identity-fips.us-west-2.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "cognito-identity-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cognito-identity.us-west-2.amazonaws.com", + "tags" : [ "dualstack" ] } ] } } @@ -6132,12 +6287,18 @@ }, "ca-central-1" : { "variants" : [ { + "hostname" : "dlm-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "dlm.ca-central-1.api.aws", "tags" : [ "dualstack" ] } ] }, "ca-west-1" : { "variants" : [ { + "hostname" : "dlm-fips.ca-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "dlm.ca-west-1.api.aws", "tags" : [ "dualstack" ] } ] @@ -6216,24 +6377,36 @@ }, "us-east-1" : { "variants" : [ { + "hostname" : "dlm-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "dlm.us-east-1.api.aws", "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { + "hostname" : "dlm-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "dlm.us-east-2.api.aws", "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { + "hostname" : "dlm-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "dlm.us-west-1.api.aws", "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { + "hostname" : "dlm-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "dlm.us-west-2.api.aws", "tags" : [ "dualstack" ] } ] @@ -10531,37 +10704,81 @@ }, "endpoints" : { "af-south-1" : { - "hostname" : "internetmonitor.af-south-1.api.aws" + "hostname" : "internetmonitor.af-south-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-east-1" : { - "hostname" : "internetmonitor.ap-east-1.api.aws" + "hostname" : "internetmonitor.ap-east-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-northeast-1" : { - "hostname" : "internetmonitor.ap-northeast-1.api.aws" + "hostname" : "internetmonitor.ap-northeast-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-northeast-2" : { - "hostname" : "internetmonitor.ap-northeast-2.api.aws" + "hostname" : "internetmonitor.ap-northeast-2.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-northeast-3" : { - "hostname" : "internetmonitor.ap-northeast-3.api.aws" + "hostname" : "internetmonitor.ap-northeast-3.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-south-1" : { - "hostname" : "internetmonitor.ap-south-1.api.aws" + "hostname" : "internetmonitor.ap-south-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-south-2" : { - "hostname" : "internetmonitor.ap-south-2.api.aws" + "hostname" : "internetmonitor.ap-south-2.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-1" : { - "hostname" : "internetmonitor.ap-southeast-1.api.aws" + "hostname" : "internetmonitor.ap-southeast-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-2" : { - "hostname" : "internetmonitor.ap-southeast-2.api.aws" + "hostname" : "internetmonitor.ap-southeast-2.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-3" : { - "hostname" : "internetmonitor.ap-southeast-3.api.aws" + "hostname" : "internetmonitor.ap-southeast-3.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-4" : { - "hostname" : "internetmonitor.ap-southeast-4.api.aws" + "hostname" : "internetmonitor.ap-southeast-4.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-5" : { "hostname" : "internetmonitor.ap-southeast-5.api.aws" @@ -10571,52 +10788,108 @@ "variants" : [ { "hostname" : "internetmonitor-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "internetmonitor-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "internetmonitor.ca-central-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "ca-west-1" : { "hostname" : "internetmonitor.ca-west-1.api.aws" }, "eu-central-1" : { - "hostname" : "internetmonitor.eu-central-1.api.aws" + "hostname" : "internetmonitor.eu-central-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-central-2" : { - "hostname" : "internetmonitor.eu-central-2.api.aws" + "hostname" : "internetmonitor.eu-central-2.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-north-1" : { - "hostname" : "internetmonitor.eu-north-1.api.aws" + "hostname" : "internetmonitor.eu-north-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-south-1" : { - "hostname" : "internetmonitor.eu-south-1.api.aws" + "hostname" : "internetmonitor.eu-south-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-south-2" : { - "hostname" : "internetmonitor.eu-south-2.api.aws" + "hostname" : "internetmonitor.eu-south-2.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-west-1" : { - "hostname" : "internetmonitor.eu-west-1.api.aws" + "hostname" : "internetmonitor.eu-west-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-west-2" : { - "hostname" : "internetmonitor.eu-west-2.api.aws" + "hostname" : "internetmonitor.eu-west-2.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-west-3" : { - "hostname" : "internetmonitor.eu-west-3.api.aws" + "hostname" : "internetmonitor.eu-west-3.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] }, "il-central-1" : { "hostname" : "internetmonitor.il-central-1.api.aws" }, "me-central-1" : { - "hostname" : "internetmonitor.me-central-1.api.aws" + "hostname" : "internetmonitor.me-central-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "me-south-1" : { - "hostname" : "internetmonitor.me-south-1.api.aws" + "hostname" : "internetmonitor.me-south-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "sa-east-1" : { - "hostname" : "internetmonitor.sa-east-1.api.aws" + "hostname" : "internetmonitor.sa-east-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "us-east-1" : { "hostname" : "internetmonitor.us-east-1.api.aws", "variants" : [ { "hostname" : "internetmonitor-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "internetmonitor-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "internetmonitor.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { @@ -10624,6 +10897,12 @@ "variants" : [ { "hostname" : "internetmonitor-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "internetmonitor-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "internetmonitor.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { @@ -10631,6 +10910,12 @@ "variants" : [ { "hostname" : "internetmonitor-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "internetmonitor-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "internetmonitor.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { @@ -10638,6 +10923,12 @@ "variants" : [ { "hostname" : "internetmonitor-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "internetmonitor-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "internetmonitor.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -12225,28 +12516,138 @@ }, "lakeformation" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ap-southeast-5" : { }, - "ca-central-1" : { }, - "ca-west-1" : { }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "lakeformation.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "lakeformation.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "lakeformation.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "lakeformation.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "lakeformation.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "lakeformation.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "lakeformation.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "lakeformation.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "lakeformation.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "lakeformation.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "lakeformation.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-5" : { + "variants" : [ { + "hostname" : "lakeformation.ap-southeast-5.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "lakeformation.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "lakeformation.ca-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "lakeformation.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "lakeformation.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "lakeformation.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "lakeformation.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "lakeformation.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "lakeformation.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "lakeformation.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "lakeformation.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -12275,32 +12676,76 @@ "deprecated" : true, "hostname" : "lakeformation-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "lakeformation.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "lakeformation.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "lakeformation.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "lakeformation.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { "hostname" : "lakeformation-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "lakeformation-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "lakeformation.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "lakeformation-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "lakeformation-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "lakeformation.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "lakeformation-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "lakeformation-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "lakeformation.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "lakeformation-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "lakeformation-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "lakeformation.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -14236,6 +14681,7 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "network-firewall-fips.ca-central-1.amazonaws.com", @@ -21008,34 +21454,8 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, - "ca-central-1" : { - "variants" : [ { - "hostname" : "streams.dynamodb-fips.ca-central-1.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, - "ca-central-1-fips" : { - "credentialScope" : { - "region" : "ca-central-1" - }, - "deprecated" : true, - "hostname" : "streams.dynamodb-fips.ca-central-1.amazonaws.com", - "protocols" : [ "https" ] - }, - "ca-west-1" : { - "variants" : [ { - "hostname" : "streams.dynamodb-fips.ca-west-1.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, - "ca-west-1-fips" : { - "credentialScope" : { - "region" : "ca-west-1" - }, - "deprecated" : true, - "hostname" : "streams.dynamodb-fips.ca-west-1.amazonaws.com", - "protocols" : [ "https" ] - }, + "ca-central-1" : { }, + "ca-west-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -21055,62 +21475,10 @@ "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, - "us-east-1" : { - "variants" : [ { - "hostname" : "streams.dynamodb-fips.us-east-1.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, - "us-east-1-fips" : { - "credentialScope" : { - "region" : "us-east-1" - }, - "deprecated" : true, - "hostname" : "streams.dynamodb-fips.us-east-1.amazonaws.com", - "protocols" : [ "https" ] - }, - "us-east-2" : { - "variants" : [ { - "hostname" : "streams.dynamodb-fips.us-east-2.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, - "us-east-2-fips" : { - "credentialScope" : { - "region" : "us-east-2" - }, - "deprecated" : true, - "hostname" : "streams.dynamodb-fips.us-east-2.amazonaws.com", - "protocols" : [ "https" ] - }, - "us-west-1" : { - "variants" : [ { - "hostname" : "streams.dynamodb-fips.us-west-1.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, - "us-west-1-fips" : { - "credentialScope" : { - "region" : "us-west-1" - }, - "deprecated" : true, - "hostname" : "streams.dynamodb-fips.us-west-1.amazonaws.com", - "protocols" : [ "https" ] - }, - "us-west-2" : { - "variants" : [ { - "hostname" : "streams.dynamodb-fips.us-west-2.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, - "us-west-2-fips" : { - "credentialScope" : { - "region" : "us-west-2" - }, - "deprecated" : true, - "hostname" : "streams.dynamodb-fips.us-west-2.amazonaws.com", - "protocols" : [ "https" ] - } + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } } }, "sts" : { @@ -21989,6 +22357,28 @@ } } }, + "trustedadvisor" : { + "endpoints" : { + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "trustedadvisor-fips.us-east-1.api.aws" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "trustedadvisor-fips.us-east-2.api.aws" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "trustedadvisor-fips.us-west-2.api.aws" + } + } + }, "verifiedpermissions" : { "endpoints" : { "af-south-1" : { }, @@ -22204,11 +22594,14 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, + "ca-west-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -23840,7 +24233,12 @@ }, "cognito-identity" : { "endpoints" : { - "cn-north-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "cognito-identity.cn-north-1.amazonaws.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "compute-optimizer" : { @@ -24387,8 +24785,18 @@ }, "lakeformation" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "lakeformation.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "lakeformation.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "lambda" : { @@ -26118,8 +26526,14 @@ }, "us-gov-west-1" : { "variants" : [ { + "hostname" : "cognito-identity-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "cognito-identity-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cognito-identity.us-gov-west-1.amazonaws.com", + "tags" : [ "dualstack" ] } ] } } @@ -29880,34 +30294,8 @@ } ] }, "endpoints" : { - "us-gov-east-1" : { - "variants" : [ { - "hostname" : "streams.dynamodb-fips.us-gov-east-1.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, - "us-gov-east-1-fips" : { - "credentialScope" : { - "region" : "us-gov-east-1" - }, - "deprecated" : true, - "hostname" : "streams.dynamodb-fips.us-gov-east-1.amazonaws.com", - "protocols" : [ "https" ] - }, - "us-gov-west-1" : { - "variants" : [ { - "hostname" : "streams.dynamodb-fips.us-gov-west-1.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, - "us-gov-west-1-fips" : { - "credentialScope" : { - "region" : "us-gov-west-1" - }, - "deprecated" : true, - "hostname" : "streams.dynamodb-fips.us-gov-west-1.amazonaws.com", - "protocols" : [ "https" ] - } + "us-gov-east-1" : { }, + "us-gov-west-1" : { } } }, "sts" : { @@ -30404,6 +30792,23 @@ } }, "services" : { + "agreement-marketplace" : { + "endpoints" : { + "fips-us-iso-east-1" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "deprecated" : true, + "hostname" : "agreement-marketplace-fips.us-iso-east-1.c2s.ic.gov" + }, + "us-iso-east-1" : { + "variants" : [ { + "hostname" : "agreement-marketplace-fips.us-iso-east-1.c2s.ic.gov", + "tags" : [ "fips" ] + } ] + } + } + }, "api.ecr" : { "endpoints" : { "us-iso-east-1" : { @@ -30481,6 +30886,23 @@ "us-iso-west-1" : { } } }, + "bedrock" : { + "endpoints" : { + "bedrock-runtime-us-iso-east-1" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "hostname" : "bedrock-runtime.us-iso-east-1.c2s.ic.gov" + }, + "bedrock-us-iso-east-1" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "hostname" : "bedrock.us-iso-east-1.c2s.ic.gov" + }, + "us-iso-east-1" : { } + } + }, "cloudcontrolapi" : { "endpoints" : { "us-iso-east-1" : { }, @@ -30523,6 +30945,12 @@ } } }, + "codebuild" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, "codedeploy" : { "endpoints" : { "us-iso-east-1" : { }, @@ -31202,6 +31630,12 @@ } } }, + "scheduler" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, "secretsmanager" : { "endpoints" : { "us-iso-east-1" : { }, @@ -31249,34 +31683,8 @@ } }, "endpoints" : { - "us-iso-east-1" : { - "variants" : [ { - "hostname" : "streams.dynamodb-fips.us-iso-east-1.c2s.ic.gov", - "tags" : [ "fips" ] - } ] - }, - "us-iso-east-1-fips" : { - "credentialScope" : { - "region" : "us-iso-east-1" - }, - "deprecated" : true, - "hostname" : "streams.dynamodb-fips.us-iso-east-1.c2s.ic.gov", - "protocols" : [ "https" ] - }, - "us-iso-west-1" : { - "variants" : [ { - "hostname" : "streams.dynamodb-fips.us-iso-west-1.c2s.ic.gov", - "tags" : [ "fips" ] - } ] - }, - "us-iso-west-1-fips" : { - "credentialScope" : { - "region" : "us-iso-west-1" - }, - "deprecated" : true, - "hostname" : "streams.dynamodb-fips.us-iso-west-1.c2s.ic.gov", - "protocols" : [ "https" ] - } + "us-iso-east-1" : { }, + "us-iso-west-1" : { } } }, "sts" : { @@ -31455,13 +31863,21 @@ }, "budgets" : { "endpoints" : { + "aws-iso-b-global" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "hostname" : "budgets.global.sc2s.sgov.gov" + }, "us-isob-east-1" : { "credentialScope" : { "region" : "us-isob-east-1" }, - "hostname" : "budgets.us-isob-east-1.sc2s.sgov.gov" + "hostname" : "budgets.global.sc2s.sgov.gov" } - } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-iso-b-global" }, "cloudcontrolapi" : { "endpoints" : { @@ -31763,6 +32179,18 @@ "us-isob-east-1" : { } } }, + "organizations" : { + "endpoints" : { + "aws-iso-b-global" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "hostname" : "organizations.us-isob-east-1.sc2s.sgov.gov" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-iso-b-global" + }, "outposts" : { "endpoints" : { "us-isob-east-1" : { } @@ -31920,6 +32348,11 @@ } } }, + "scheduler" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, "secretsmanager" : { "endpoints" : { "us-isob-east-1" : { } @@ -31989,20 +32422,7 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { - "us-isob-east-1" : { - "variants" : [ { - "hostname" : "streams.dynamodb-fips.us-isob-east-1.sc2s.sgov.gov", - "tags" : [ "fips" ] - } ] - }, - "us-isob-east-1-fips" : { - "credentialScope" : { - "region" : "us-isob-east-1" - }, - "deprecated" : true, - "hostname" : "streams.dynamodb-fips.us-isob-east-1.sc2s.sgov.gov", - "protocols" : [ "https" ] - } + "us-isob-east-1" : { } } }, "sts" : { diff --git a/models/finspace.json b/models/finspace.json index 14533a3113..0d850b0733 100644 --- a/models/finspace.json +++ b/models/finspace.json @@ -5723,7 +5723,7 @@ "min": 1, "max": 1024 }, - "smithy.api#pattern": "^[a-zA-Z0-9_:./,]+$" + "smithy.api#pattern": "^[a-zA-Z0-9_:./,; ]+$" } }, "com.amazonaws.finspace#KxCommandLineArguments": { diff --git a/models/glue.json b/models/glue.json index b8a441b21e..37d25bc7bd 100644 --- a/models/glue.json +++ b/models/glue.json @@ -10749,7 +10749,7 @@ "WorkerType": { "target": "com.amazonaws.glue#WorkerType", "traits": { - "smithy.api#documentation": "

The type of predefined worker that is allocated when a job runs. Accepts a value of\n G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

\n
    \n
  • \n

    For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

    \n
  • \n
  • \n

    For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

    \n
  • \n
  • \n

    For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

    \n
  • \n
  • \n

    For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.

    \n
  • \n
" + "smithy.api#documentation": "

The type of predefined worker that is allocated when a job runs. Accepts a value of\n G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

\n
    \n
  • \n

    For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

    \n
  • \n
  • \n

    For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

    \n
  • \n
  • \n

    For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs.

    \n
  • \n
  • \n

    For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.

    \n
  • \n
" } }, "CodeGenConfigurationNodes": { @@ -11627,7 +11627,7 @@ "WorkerType": { "target": "com.amazonaws.glue#WorkerType", "traits": { - "smithy.api#documentation": "

The type of predefined worker that is allocated when a job runs. Accepts a value of\n G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X for Ray notebooks.

\n
    \n
  • \n

    For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

    \n
  • \n
  • \n

    For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

    \n
  • \n
  • \n

    For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.

    \n
  • \n
" + "smithy.api#documentation": "

The type of predefined worker that is allocated when a job runs. Accepts a value of\n G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X for Ray notebooks.

\n
    \n
  • \n

    For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

    \n
  • \n
  • \n

    For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

    \n
  • \n
  • \n

    For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.

    \n
  • \n
" } }, "SecurityConfiguration": { @@ -11894,7 +11894,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new trigger.

" + "smithy.api#documentation": "

Creates a new trigger.

\n

Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to keep them within the Job.

" } }, "com.amazonaws.glue#CreateTriggerRequest": { @@ -12184,7 +12184,7 @@ "DefaultRunProperties": { "target": "com.amazonaws.glue#WorkflowRunProperties", "traits": { - "smithy.api#documentation": "

A collection of properties to be used as part of each execution of the workflow.

" + "smithy.api#documentation": "

A collection of properties to be used as part of each execution of the workflow.

\n

Run properties may be logged. Do not pass plaintext secrets as properties. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to use them within the workflow run.

" } }, "Tags": { @@ -12915,6 +12915,43 @@ } } }, + "com.amazonaws.glue#DataQualityEncryption": { + "type": "structure", + "members": { + "DataQualityEncryptionMode": { + "target": "com.amazonaws.glue#DataQualityEncryptionMode", + "traits": { + "smithy.api#documentation": "

The encryption mode to use for encrypting Data Quality assets. These assets include data quality rulesets, results, statistics, anomaly detection models and observations.

\n

Valid values are SSEKMS for encryption using a customer-managed KMS key, or DISABLED.

" + } + }, + "KmsKeyArn": { + "target": "com.amazonaws.glue#KmsKeyArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies how Data Quality assets in your account should be encrypted.

" + } + }, + "com.amazonaws.glue#DataQualityEncryptionMode": { + "type": "enum", + "members": { + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + }, + "SSEKMS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SSE-KMS" + } + } + } + }, "com.amazonaws.glue#DataQualityEvaluationRunAdditionalRunOptions": { "type": "structure", "members": { @@ -17152,6 +17189,12 @@ "traits": { "smithy.api#documentation": "

The encryption configuration for job bookmarks.

" } + }, + "DataQualityEncryption": { + "target": "com.amazonaws.glue#DataQualityEncryption", + "traits": { + "smithy.api#documentation": "

The encryption configuration for Glue Data Quality assets.

" + } } }, "traits": { @@ -21378,7 +21421,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the metadata for a given job run. Job run history is accessible for 90 days for your workflow and job run.

" + "smithy.api#documentation": "

Retrieves the metadata for a given job run. Job run history is accessible for 365 days for your workflow and job run.

" } }, "com.amazonaws.glue#GetJobRunRequest": { @@ -21447,7 +21490,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves metadata for all runs of a given job definition.

", + "smithy.api#documentation": "

Retrieves metadata for all runs of a given job definition.

\n

\n GetJobRuns returns the job runs in chronological order, with the newest jobs returned first.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -27027,7 +27070,7 @@ "WorkerType": { "target": "com.amazonaws.glue#WorkerType", "traits": { - "smithy.api#documentation": "

The type of predefined worker that is allocated when a job runs. Accepts a value of\n G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

\n
    \n
  • \n

    For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

    \n
  • \n
  • \n

    For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

    \n
  • \n
  • \n

    For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

    \n
  • \n
  • \n

    For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.

    \n
  • \n
" + "smithy.api#documentation": "

The type of predefined worker that is allocated when a job runs. Accepts a value of\n G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

\n
    \n
  • \n

    For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

    \n
  • \n
  • \n

    For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

    \n
  • \n
  • \n

    For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs.

    \n
  • \n
  • \n

    For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.

    \n
  • \n
" } }, "NumberOfWorkers": { @@ -27383,7 +27426,7 @@ "WorkerType": { "target": "com.amazonaws.glue#WorkerType", "traits": { - "smithy.api#documentation": "

The type of predefined worker that is allocated when a job runs. Accepts a value of\n G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

\n
    \n
  • \n

    For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

    \n
  • \n
  • \n

    For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

    \n
  • \n
  • \n

    For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

    \n
  • \n
  • \n

    For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.

    \n
  • \n
" + "smithy.api#documentation": "

The type of predefined worker that is allocated when a job runs. Accepts a value of\n G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

\n
    \n
  • \n

    For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

    \n
  • \n
  • \n

    For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

    \n
  • \n
  • \n

    For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs.

    \n
  • \n
  • \n

    For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.

    \n
  • \n
" } }, "NumberOfWorkers": { @@ -27617,7 +27660,7 @@ "WorkerType": { "target": "com.amazonaws.glue#WorkerType", "traits": { - "smithy.api#documentation": "

The type of predefined worker that is allocated when a job runs. Accepts a value of\n G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

\n
    \n
  • \n

    For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

    \n
  • \n
  • \n

    For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

    \n
  • \n
  • \n

    For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

    \n
  • \n
  • \n

    For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.

    \n
  • \n
" + "smithy.api#documentation": "

The type of predefined worker that is allocated when a job runs. Accepts a value of\n G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

\n
    \n
  • \n

    For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

    \n
  • \n
  • \n

    For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

    \n
  • \n
  • \n

    For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs.

    \n
  • \n
  • \n

    For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.

    \n
  • \n
" } }, "NumberOfWorkers": { @@ -33645,7 +33688,7 @@ "RunProperties": { "target": "com.amazonaws.glue#WorkflowRunProperties", "traits": { - "smithy.api#documentation": "

The properties to put for the specified run.

", + "smithy.api#documentation": "

The properties to put for the specified run.

\n

Run properties may be logged. Do not pass plaintext secrets as properties. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to use them within the workflow run.

", "smithy.api#required": {} } } @@ -38494,7 +38537,7 @@ "WorkerType": { "target": "com.amazonaws.glue#WorkerType", "traits": { - "smithy.api#documentation": "

The type of predefined worker that is allocated when a job runs. Accepts a value of\n G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

\n
    \n
  • \n

    For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

    \n
  • \n
  • \n

    For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

    \n
  • \n
  • \n

    For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

    \n
  • \n
  • \n

    For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.

    \n
  • \n
" + "smithy.api#documentation": "

The type of predefined worker that is allocated when a job runs. Accepts a value of\n G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

\n
    \n
  • \n

    For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

    \n
  • \n
  • \n

    For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

    \n
  • \n
  • \n

    For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

    \n
  • \n
  • \n

    For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs.

    \n
  • \n
  • \n

    For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.

    \n
  • \n
" } }, "NumberOfWorkers": { @@ -38760,7 +38803,7 @@ "RunProperties": { "target": "com.amazonaws.glue#WorkflowRunProperties", "traits": { - "smithy.api#documentation": "

The workflow run properties for the new workflow run.

" + "smithy.api#documentation": "

The workflow run properties for the new workflow run.

\n

Run properties may be logged. Do not pass plaintext secrets as properties. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to use them within the workflow run.

" } } }, @@ -44096,7 +44139,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates a trigger definition.

" + "smithy.api#documentation": "

Updates a trigger definition.

\n

Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to keep them within the Job.

" } }, "com.amazonaws.glue#UpdateTriggerRequest": { @@ -44328,7 +44371,7 @@ "DefaultRunProperties": { "target": "com.amazonaws.glue#WorkflowRunProperties", "traits": { - "smithy.api#documentation": "

A collection of properties to be used as part of each execution of the workflow.

" + "smithy.api#documentation": "

A collection of properties to be used as part of each execution of the workflow.

\n

Run properties may be logged. Do not pass plaintext secrets as properties. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to use them within the workflow run.

" } }, "MaxConcurrentRuns": { diff --git a/models/greengrassv2.json b/models/greengrassv2.json index 3d4035e32f..6b03e5a077 100644 --- a/models/greengrassv2.json +++ b/models/greengrassv2.json @@ -958,6 +958,24 @@ "traits": { "smithy.api#documentation": "

The time at which the core device's status last updated, expressed in ISO 8601\n format.

" } + }, + "platform": { + "target": "com.amazonaws.greengrassv2#CoreDevicePlatformString", + "traits": { + "smithy.api#documentation": "

The operating system platform that the core device runs.

" + } + }, + "architecture": { + "target": "com.amazonaws.greengrassv2#CoreDeviceArchitectureString", + "traits": { + "smithy.api#documentation": "

The computer architecture of the core device.

" + } + }, + "runtime": { + "target": "com.amazonaws.greengrassv2#CoreDeviceRuntimeString", + "traits": { + "smithy.api#documentation": "

The runtime for the core device. The runtime can be:

\n
    \n
  • \n

    \n aws_nucleus_classic\n

    \n
  • \n
  • \n

    \n aws_nucleus_lite\n

    \n
  • \n
" + } } }, "traits": { @@ -982,6 +1000,15 @@ } } }, + "com.amazonaws.greengrassv2#CoreDeviceRuntimeString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + } + } + }, "com.amazonaws.greengrassv2#CoreDeviceStatus": { "type": "enum", "members": { @@ -2425,6 +2452,12 @@ "smithy.api#documentation": "

The computer architecture of the core device.

" } }, + "runtime": { + "target": "com.amazonaws.greengrassv2#CoreDeviceRuntimeString", + "traits": { + "smithy.api#documentation": "

The runtime for the core device. The runtime can be:

\n
    \n
  • \n

    \n aws_nucleus_classic\n

    \n
  • \n
  • \n

    \n aws_nucleus_lite\n

    \n
  • \n
" + } + }, "status": { "target": "com.amazonaws.greengrassv2#CoreDeviceStatus", "traits": { @@ -4828,7 +4861,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves a paginated list of Greengrass core devices.

\n \n

IoT Greengrass relies on individual devices to send status updates to the Amazon Web Services Cloud. If the\n IoT Greengrass Core software isn't running on the device, or if device isn't connected to the Amazon Web Services Cloud,\n then the reported status of that device might not reflect its current status. The status\n timestamp indicates when the device status was last updated.

\n

Core devices send status updates at the following times:

\n
    \n
  • \n

    When the IoT Greengrass Core software starts

    \n
  • \n
  • \n

    When the core device receives a deployment from the Amazon Web Services Cloud

    \n
  • \n
  • \n

    When the status of any component on the core device becomes\n BROKEN\n

    \n
  • \n
  • \n

    At a regular interval that you can configure, which defaults to 24 hours

    \n
  • \n
  • \n

    For IoT Greengrass Core v2.7.0, the core device sends status updates upon local deployment and\n cloud deployment

    \n
  • \n
\n
", + "smithy.api#documentation": "

Retrieves a paginated list of Greengrass core devices.

\n \n

IoT Greengrass relies on individual devices to send status updates to the Amazon Web Services Cloud. If the\n IoT Greengrass Core software isn't running on the device, or if device isn't connected to the Amazon Web Services Cloud,\n then the reported status of that device might not reflect its current status. The status\n timestamp indicates when the device status was last updated.

\n

Core devices send status updates at the following times:

\n
    \n
  • \n

    When the IoT Greengrass Core software starts

    \n
  • \n
  • \n

    When the core device receives a deployment from the Amazon Web Services Cloud

    \n
  • \n
  • \n

    For Greengrass nucleus 2.12.2 and earlier, the core device sends status updates when the\n status of any component on the core device becomes ERRORED or\n BROKEN.

    \n
  • \n
  • \n

    For Greengrass nucleus 2.12.3 and later, the core device sends status updates when the\n status of any component on the core device becomes ERRORED,\n BROKEN, RUNNING, or FINISHED.

    \n
  • \n
  • \n

    At a regular interval that you can configure, which defaults to 24 hours

    \n
  • \n
  • \n

    For IoT Greengrass Core v2.7.0, the core device sends status updates upon local deployment and\n cloud deployment

    \n
  • \n
\n
", "smithy.api#http": { "method": "GET", "uri": "/greengrass/v2/coreDevices", @@ -4872,6 +4905,13 @@ "smithy.api#documentation": "

The token to be used for the next set of paginated results.

", "smithy.api#httpQuery": "nextToken" } + }, + "runtime": { + "target": "com.amazonaws.greengrassv2#CoreDeviceRuntimeString", + "traits": { + "smithy.api#documentation": "

The runtime to be used by the core device. The runtime can be:

\n
    \n
  • \n

    \n aws_nucleus_classic\n

    \n
  • \n
  • \n

    \n aws_nucleus_lite\n

    \n
  • \n
", + "smithy.api#httpQuery": "runtime" + } } }, "traits": { diff --git a/models/guardduty.json b/models/guardduty.json index 1292bbc67d..615a4135bf 100644 --- a/models/guardduty.json +++ b/models/guardduty.json @@ -2090,7 +2090,7 @@ "target": "com.amazonaws.guardduty#FindingCriteria", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Represents the criteria to be used in the filter for querying findings.

\n

You can only use the following attributes to query findings:

\n
    \n
  • \n

    accountId

    \n
  • \n
  • \n

    id

    \n
  • \n
  • \n

    region

    \n
  • \n
  • \n

    severity

    \n

    To filter on the basis of severity, the API and CLI use the following input list for\n the FindingCriteria\n condition:

    \n
      \n
    • \n

      \n Low: [\"1\", \"2\", \"3\"]\n

      \n
    • \n
    • \n

      \n Medium: [\"4\", \"5\", \"6\"]\n

      \n
    • \n
    • \n

      \n High: [\"7\", \"8\", \"9\"]\n

      \n
    • \n
    \n

    For more information, see Severity\n levels for GuardDuty findings.

    \n
  • \n
  • \n

    type

    \n
  • \n
  • \n

    updatedAt

    \n

    Type: ISO 8601 string format: YYYY-MM-DDTHH:MM:SS.SSSZ or YYYY-MM-DDTHH:MM:SSZ\n depending on whether the value contains milliseconds.

    \n
  • \n
  • \n

    resource.accessKeyDetails.accessKeyId

    \n
  • \n
  • \n

    resource.accessKeyDetails.principalId

    \n
  • \n
  • \n

    resource.accessKeyDetails.userName

    \n
  • \n
  • \n

    resource.accessKeyDetails.userType

    \n
  • \n
  • \n

    resource.instanceDetails.iamInstanceProfile.id

    \n
  • \n
  • \n

    resource.instanceDetails.imageId

    \n
  • \n
  • \n

    resource.instanceDetails.instanceId

    \n
  • \n
  • \n

    resource.instanceDetails.tags.key

    \n
  • \n
  • \n

    resource.instanceDetails.tags.value

    \n
  • \n
  • \n

    resource.instanceDetails.networkInterfaces.ipv6Addresses

    \n
  • \n
  • \n

    resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress

    \n
  • \n
  • \n

    resource.instanceDetails.networkInterfaces.publicDnsName

    \n
  • \n
  • \n

    resource.instanceDetails.networkInterfaces.publicIp

    \n
  • \n
  • \n

    resource.instanceDetails.networkInterfaces.securityGroups.groupId

    \n
  • \n
  • \n

    resource.instanceDetails.networkInterfaces.securityGroups.groupName

    \n
  • \n
  • \n

    resource.instanceDetails.networkInterfaces.subnetId

    \n
  • \n
  • \n

    resource.instanceDetails.networkInterfaces.vpcId

    \n
  • \n
  • \n

    resource.instanceDetails.outpostArn

    \n
  • \n
  • \n

    resource.resourceType

    \n
  • \n
  • \n

    resource.s3BucketDetails.publicAccess.effectivePermissions

    \n
  • \n
  • \n

    resource.s3BucketDetails.name

    \n
  • \n
  • \n

    resource.s3BucketDetails.tags.key

    \n
  • \n
  • \n

    resource.s3BucketDetails.tags.value

    \n
  • \n
  • \n

    resource.s3BucketDetails.type

    \n
  • \n
  • \n

    service.action.actionType

    \n
  • \n
  • \n

    service.action.awsApiCallAction.api

    \n
  • \n
  • \n

    service.action.awsApiCallAction.callerType

    \n
  • \n
  • \n

    service.action.awsApiCallAction.errorCode

    \n
  • \n
  • \n

    service.action.awsApiCallAction.remoteIpDetails.city.cityName

    \n
  • \n
  • \n

    service.action.awsApiCallAction.remoteIpDetails.country.countryName

    \n
  • \n
  • \n

    service.action.awsApiCallAction.remoteIpDetails.ipAddressV4

    \n
  • \n
  • \n

    service.action.awsApiCallAction.remoteIpDetails.ipAddressV6

    \n
  • \n
  • \n

    service.action.awsApiCallAction.remoteIpDetails.organization.asn

    \n
  • \n
  • \n

    service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg

    \n
  • \n
  • \n

    service.action.awsApiCallAction.serviceName

    \n
  • \n
  • \n

    service.action.dnsRequestAction.domain

    \n
  • \n
  • \n

    service.action.dnsRequestAction.domainWithSuffix

    \n
  • \n
  • \n

    service.action.networkConnectionAction.blocked

    \n
  • \n
  • \n

    service.action.networkConnectionAction.connectionDirection

    \n
  • \n
  • \n

    service.action.networkConnectionAction.localPortDetails.port

    \n
  • \n
  • \n

    service.action.networkConnectionAction.protocol

    \n
  • \n
  • \n

    service.action.networkConnectionAction.remoteIpDetails.city.cityName

    \n
  • \n
  • \n

    service.action.networkConnectionAction.remoteIpDetails.country.countryName

    \n
  • \n
  • \n

    service.action.networkConnectionAction.remoteIpDetails.ipAddressV4

    \n
  • \n
  • \n

    service.action.networkConnectionAction.remoteIpDetails.ipAddressV6

    \n
  • \n
  • \n

    service.action.networkConnectionAction.remoteIpDetails.organization.asn

    \n
  • \n
  • \n

    service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg

    \n
  • \n
  • \n

    service.action.networkConnectionAction.remotePortDetails.port

    \n
  • \n
  • \n

    service.action.awsApiCallAction.remoteAccountDetails.affiliated

    \n
  • \n
  • \n

    service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV4

    \n
  • \n
  • \n

    service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV6

    \n
  • \n
  • \n

    service.action.kubernetesApiCallAction.namespace

    \n
  • \n
  • \n

    service.action.kubernetesApiCallAction.remoteIpDetails.organization.asn

    \n
  • \n
  • \n

    service.action.kubernetesApiCallAction.requestUri

    \n
  • \n
  • \n

    service.action.kubernetesApiCallAction.statusCode

    \n
  • \n
  • \n

    service.action.networkConnectionAction.localIpDetails.ipAddressV4

    \n
  • \n
  • \n

    service.action.networkConnectionAction.localIpDetails.ipAddressV6

    \n
  • \n
  • \n

    service.action.networkConnectionAction.protocol

    \n
  • \n
  • \n

    service.action.awsApiCallAction.serviceName

    \n
  • \n
  • \n

    service.action.awsApiCallAction.remoteAccountDetails.accountId

    \n
  • \n
  • \n

    service.additionalInfo.threatListName

    \n
  • \n
  • \n

    service.resourceRole

    \n
  • \n
  • \n

    resource.eksClusterDetails.name

    \n
  • \n
  • \n

    resource.kubernetesDetails.kubernetesWorkloadDetails.name

    \n
  • \n
  • \n

    resource.kubernetesDetails.kubernetesWorkloadDetails.namespace

    \n
  • \n
  • \n

    resource.kubernetesDetails.kubernetesUserDetails.username

    \n
  • \n
  • \n

    resource.kubernetesDetails.kubernetesWorkloadDetails.containers.image

    \n
  • \n
  • \n

    resource.kubernetesDetails.kubernetesWorkloadDetails.containers.imagePrefix

    \n
  • \n
  • \n

    service.ebsVolumeScanDetails.scanId

    \n
  • \n
  • \n

    service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.name

    \n
  • \n
  • \n

    service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.severity

    \n
  • \n
  • \n

    service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.filePaths.hash

    \n
  • \n
  • \n

    resource.ecsClusterDetails.name

    \n
  • \n
  • \n

    resource.ecsClusterDetails.taskDetails.containers.image

    \n
  • \n
  • \n

    resource.ecsClusterDetails.taskDetails.definitionArn

    \n
  • \n
  • \n

    resource.containerDetails.image

    \n
  • \n
  • \n

    resource.rdsDbInstanceDetails.dbInstanceIdentifier

    \n
  • \n
  • \n

    resource.rdsDbInstanceDetails.dbClusterIdentifier

    \n
  • \n
  • \n

    resource.rdsDbInstanceDetails.engine

    \n
  • \n
  • \n

    resource.rdsDbUserDetails.user

    \n
  • \n
  • \n

    resource.rdsDbInstanceDetails.tags.key

    \n
  • \n
  • \n

    resource.rdsDbInstanceDetails.tags.value

    \n
  • \n
  • \n

    service.runtimeDetails.process.executableSha256

    \n
  • \n
  • \n

    service.runtimeDetails.process.name

    \n
  • \n
  • \n

    service.runtimeDetails.process.name

    \n
  • \n
  • \n

    resource.lambdaDetails.functionName

    \n
  • \n
  • \n

    resource.lambdaDetails.functionArn

    \n
  • \n
  • \n

    resource.lambdaDetails.tags.key

    \n
  • \n
  • \n

    resource.lambdaDetails.tags.value

    \n
  • \n
", + "smithy.api#documentation": "

Represents the criteria to be used in the filter for querying findings.

\n

You can only use the following attributes to query findings:

\n
    \n
  • \n

    accountId

    \n
  • \n
  • \n

    id

    \n
  • \n
  • \n

    region

    \n
  • \n
  • \n

    severity

    \n

    To filter on the basis of severity, the API and CLI use the following input list for\n the FindingCriteria\n condition:

    \n
      \n
    • \n

      \n Low: [\"1\", \"2\", \"3\"]\n

      \n
    • \n
    • \n

      \n Medium: [\"4\", \"5\", \"6\"]\n

      \n
    • \n
    • \n

      \n High: [\"7\", \"8\"]\n

      \n
    • \n
    • \n

      \n Critical: [\"9\", \"10\"]\n

      \n
    • \n
    \n

    For more information, see Findings severity levels\n in the Amazon GuardDuty User Guide.

    \n
  • \n
  • \n

    type

    \n
  • \n
  • \n

    updatedAt

    \n

    Type: ISO 8601 string format: YYYY-MM-DDTHH:MM:SS.SSSZ or YYYY-MM-DDTHH:MM:SSZ\n depending on whether the value contains milliseconds.

    \n
  • \n
  • \n

    resource.accessKeyDetails.accessKeyId

    \n
  • \n
  • \n

    resource.accessKeyDetails.principalId

    \n
  • \n
  • \n

    resource.accessKeyDetails.userName

    \n
  • \n
  • \n

    resource.accessKeyDetails.userType

    \n
  • \n
  • \n

    resource.instanceDetails.iamInstanceProfile.id

    \n
  • \n
  • \n

    resource.instanceDetails.imageId

    \n
  • \n
  • \n

    resource.instanceDetails.instanceId

    \n
  • \n
  • \n

    resource.instanceDetails.tags.key

    \n
  • \n
  • \n

    resource.instanceDetails.tags.value

    \n
  • \n
  • \n

    resource.instanceDetails.networkInterfaces.ipv6Addresses

    \n
  • \n
  • \n

    resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress

    \n
  • \n
  • \n

    resource.instanceDetails.networkInterfaces.publicDnsName

    \n
  • \n
  • \n

    resource.instanceDetails.networkInterfaces.publicIp

    \n
  • \n
  • \n

    resource.instanceDetails.networkInterfaces.securityGroups.groupId

    \n
  • \n
  • \n

    resource.instanceDetails.networkInterfaces.securityGroups.groupName

    \n
  • \n
  • \n

    resource.instanceDetails.networkInterfaces.subnetId

    \n
  • \n
  • \n

    resource.instanceDetails.networkInterfaces.vpcId

    \n
  • \n
  • \n

    resource.instanceDetails.outpostArn

    \n
  • \n
  • \n

    resource.resourceType

    \n
  • \n
  • \n

    resource.s3BucketDetails.publicAccess.effectivePermissions

    \n
  • \n
  • \n

    resource.s3BucketDetails.name

    \n
  • \n
  • \n

    resource.s3BucketDetails.tags.key

    \n
  • \n
  • \n

    resource.s3BucketDetails.tags.value

    \n
  • \n
  • \n

    resource.s3BucketDetails.type

    \n
  • \n
  • \n

    service.action.actionType

    \n
  • \n
  • \n

    service.action.awsApiCallAction.api

    \n
  • \n
  • \n

    service.action.awsApiCallAction.callerType

    \n
  • \n
  • \n

    service.action.awsApiCallAction.errorCode

    \n
  • \n
  • \n

    service.action.awsApiCallAction.remoteIpDetails.city.cityName

    \n
  • \n
  • \n

    service.action.awsApiCallAction.remoteIpDetails.country.countryName

    \n
  • \n
  • \n

    service.action.awsApiCallAction.remoteIpDetails.ipAddressV4

    \n
  • \n
  • \n

    service.action.awsApiCallAction.remoteIpDetails.ipAddressV6

    \n
  • \n
  • \n

    service.action.awsApiCallAction.remoteIpDetails.organization.asn

    \n
  • \n
  • \n

    service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg

    \n
  • \n
  • \n

    service.action.awsApiCallAction.serviceName

    \n
  • \n
  • \n

    service.action.dnsRequestAction.domain

    \n
  • \n
  • \n

    service.action.dnsRequestAction.domainWithSuffix

    \n
  • \n
  • \n

    service.action.networkConnectionAction.blocked

    \n
  • \n
  • \n

    service.action.networkConnectionAction.connectionDirection

    \n
  • \n
  • \n

    service.action.networkConnectionAction.localPortDetails.port

    \n
  • \n
  • \n

    service.action.networkConnectionAction.protocol

    \n
  • \n
  • \n

    service.action.networkConnectionAction.remoteIpDetails.city.cityName

    \n
  • \n
  • \n

    service.action.networkConnectionAction.remoteIpDetails.country.countryName

    \n
  • \n
  • \n

    service.action.networkConnectionAction.remoteIpDetails.ipAddressV4

    \n
  • \n
  • \n

    service.action.networkConnectionAction.remoteIpDetails.ipAddressV6

    \n
  • \n
  • \n

    service.action.networkConnectionAction.remoteIpDetails.organization.asn

    \n
  • \n
  • \n

    service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg

    \n
  • \n
  • \n

    service.action.networkConnectionAction.remotePortDetails.port

    \n
  • \n
  • \n

    service.action.awsApiCallAction.remoteAccountDetails.affiliated

    \n
  • \n
  • \n

    service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV4

    \n
  • \n
  • \n

    service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV6

    \n
  • \n
  • \n

    service.action.kubernetesApiCallAction.namespace

    \n
  • \n
  • \n

    service.action.kubernetesApiCallAction.remoteIpDetails.organization.asn

    \n
  • \n
  • \n

    service.action.kubernetesApiCallAction.requestUri

    \n
  • \n
  • \n

    service.action.kubernetesApiCallAction.statusCode

    \n
  • \n
  • \n

    service.action.networkConnectionAction.localIpDetails.ipAddressV4

    \n
  • \n
  • \n

    service.action.networkConnectionAction.localIpDetails.ipAddressV6

    \n
  • \n
  • \n

    service.action.networkConnectionAction.protocol

    \n
  • \n
  • \n

    service.action.awsApiCallAction.serviceName

    \n
  • \n
  • \n

    service.action.awsApiCallAction.remoteAccountDetails.accountId

    \n
  • \n
  • \n

    service.additionalInfo.threatListName

    \n
  • \n
  • \n

    service.resourceRole

    \n
  • \n
  • \n

    resource.eksClusterDetails.name

    \n
  • \n
  • \n

    resource.kubernetesDetails.kubernetesWorkloadDetails.name

    \n
  • \n
  • \n

    resource.kubernetesDetails.kubernetesWorkloadDetails.namespace

    \n
  • \n
  • \n

    resource.kubernetesDetails.kubernetesUserDetails.username

    \n
  • \n
  • \n

    resource.kubernetesDetails.kubernetesWorkloadDetails.containers.image

    \n
  • \n
  • \n

    resource.kubernetesDetails.kubernetesWorkloadDetails.containers.imagePrefix

    \n
  • \n
  • \n

    service.ebsVolumeScanDetails.scanId

    \n
  • \n
  • \n

    service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.name

    \n
  • \n
  • \n

    service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.severity

    \n
  • \n
  • \n

    service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.filePaths.hash

    \n
  • \n
  • \n

    resource.ecsClusterDetails.name

    \n
  • \n
  • \n

    resource.ecsClusterDetails.taskDetails.containers.image

    \n
  • \n
  • \n

    resource.ecsClusterDetails.taskDetails.definitionArn

    \n
  • \n
  • \n

    resource.containerDetails.image

    \n
  • \n
  • \n

    resource.rdsDbInstanceDetails.dbInstanceIdentifier

    \n
  • \n
  • \n

    resource.rdsDbInstanceDetails.dbClusterIdentifier

    \n
  • \n
  • \n

    resource.rdsDbInstanceDetails.engine

    \n
  • \n
  • \n

    resource.rdsDbUserDetails.user

    \n
  • \n
  • \n

    resource.rdsDbInstanceDetails.tags.key

    \n
  • \n
  • \n

    resource.rdsDbInstanceDetails.tags.value

    \n
  • \n
  • \n

    service.runtimeDetails.process.executableSha256

    \n
  • \n
  • \n

    service.runtimeDetails.process.name

    \n
  • \n
  • \n

    service.runtimeDetails.process.name

    \n
  • \n
  • \n

    resource.lambdaDetails.functionName

    \n
  • \n
  • \n

    resource.lambdaDetails.functionArn

    \n
  • \n
  • \n

    resource.lambdaDetails.tags.key

    \n
  • \n
  • \n

    resource.lambdaDetails.tags.value

    \n
  • \n
", "smithy.api#jsonName": "findingCriteria", "smithy.api#required": {} } @@ -3643,7 +3643,7 @@ "target": "com.amazonaws.guardduty#Scans", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Contains information about malware scans.

", + "smithy.api#documentation": "

Contains information about malware scans associated with GuardDuty Malware Protection for EC2.

", "smithy.api#jsonName": "scans", "smithy.api#required": {} } @@ -11969,7 +11969,7 @@ "Name": { "target": "com.amazonaws.guardduty#OrgFeatureAdditionalConfiguration", "traits": { - "smithy.api#documentation": "

The name of the additional configuration that will be configured for the\n organization.

", + "smithy.api#documentation": "

The name of the additional configuration that will be configured for the\n organization. These values are applicable to only Runtime Monitoring protection plan.

", "smithy.api#jsonName": "name" } }, @@ -11982,7 +11982,7 @@ } }, "traits": { - "smithy.api#documentation": "

A list of additional configurations which will be configured for the organization.

" + "smithy.api#documentation": "

A list of additional configurations which will be configured for the organization.

\n

Additional configuration applies to only GuardDuty Runtime Monitoring protection plan.

" } }, "com.amazonaws.guardduty#OrganizationAdditionalConfigurationResult": { @@ -11991,7 +11991,7 @@ "Name": { "target": "com.amazonaws.guardduty#OrgFeatureAdditionalConfiguration", "traits": { - "smithy.api#documentation": "

The name of the additional configuration that is configured for the member accounts within\n the organization.

", + "smithy.api#documentation": "

The name of the additional configuration that is configured for the member accounts within\n the organization. These values are applicable to only Runtime Monitoring protection plan.

", "smithy.api#jsonName": "name" } }, @@ -14015,7 +14015,7 @@ "DetectorId": { "target": "com.amazonaws.guardduty#DetectorId", "traits": { - "smithy.api#documentation": "

The unique ID of the detector that the request is associated with.

\n

To find the detectorId in the current Region, see the\nSettings page in the GuardDuty console, or run the ListDetectors API.

", + "smithy.api#documentation": "

The unique ID of the detector that is associated with the request.

\n

To find the detectorId in the current Region, see the\nSettings page in the GuardDuty console, or run the ListDetectors API.

", "smithy.api#jsonName": "detectorId" } }, @@ -14043,7 +14043,7 @@ "FailureReason": { "target": "com.amazonaws.guardduty#NonEmptyString", "traits": { - "smithy.api#documentation": "

Represents the reason for FAILED scan status.

", + "smithy.api#documentation": "

Represents the reason for FAILED scan status.

", "smithy.api#jsonName": "failureReason" } }, @@ -14119,7 +14119,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains information about a malware scan.

" + "smithy.api#documentation": "

Contains information about malware scans associated with GuardDuty Malware Protection for EC2.

" } }, "com.amazonaws.guardduty#ScanCondition": { @@ -16396,7 +16396,7 @@ "smithy.api#deprecated": { "message": "This field is deprecated, use AutoEnableOrganizationMembers instead" }, - "smithy.api#documentation": "

Represents whether or not to automatically enable member accounts in the organization.

\n

Even though this is still supported, we recommend using\n AutoEnableOrganizationMembers to achieve the similar results. You must provide a \n value for either autoEnableOrganizationMembers or autoEnable.

", + "smithy.api#documentation": "

Represents whether to automatically enable member accounts in the organization. This\n applies to only new member accounts, not the existing member accounts. When a new account joins the organization,\n the chosen features will be enabled for them by default.

\n

Even though this is still supported, we recommend using\n AutoEnableOrganizationMembers to achieve the similar results. You must provide a \n value for either autoEnableOrganizationMembers or autoEnable.

", "smithy.api#jsonName": "autoEnable" } }, diff --git a/models/ivs-realtime.json b/models/ivs-realtime.json index 4f32146ef9..d8fed37e16 100644 --- a/models/ivs-realtime.json +++ b/models/ivs-realtime.json @@ -854,6 +854,12 @@ "traits": { "smithy.api#documentation": "

Types of media to be recorded. Default: AUDIO_VIDEO.

" } + }, + "thumbnailConfiguration": { + "target": "com.amazonaws.ivsrealtime#ParticipantThumbnailConfiguration", + "traits": { + "smithy.api#documentation": "

A complex type that allows you to enable/disable the recording of thumbnails for individual participant recording\n and modify the interval at which thumbnails are generated for the live session.

" + } } }, "traits": { @@ -1084,6 +1090,38 @@ "target": "com.amazonaws.ivsrealtime#CompositionSummary" } }, + "com.amazonaws.ivsrealtime#CompositionThumbnailConfiguration": { + "type": "structure", + "members": { + "targetIntervalSeconds": { + "target": "com.amazonaws.ivsrealtime#ThumbnailIntervalSeconds", + "traits": { + "smithy.api#documentation": "

The targeted thumbnail-generation interval in seconds. Default: 60.

" + } + }, + "storage": { + "target": "com.amazonaws.ivsrealtime#ThumbnailStorageTypeList", + "traits": { + "smithy.api#documentation": "

Indicates the format in which thumbnails are recorded. SEQUENTIAL records all generated thumbnails\n in a serial manner, to the media/thumbnails/(width)x(height) directory, where (width) and (height) are the width\n\t and height of the thumbnail. LATEST saves the latest thumbnail in\n\t media/latest_thumbnail/(width)x(height)/thumb.jpg and overwrites it at the interval specified by\n\t targetIntervalSeconds. You can enable both SEQUENTIAL and LATEST.\n\t Default: SEQUENTIAL.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An object representing a configuration of thumbnails for recorded video for a Composition.

" + } + }, + "com.amazonaws.ivsrealtime#CompositionThumbnailConfigurationList": { + "type": "list", + "member": { + "target": "com.amazonaws.ivsrealtime#CompositionThumbnailConfiguration" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1 + } + } + }, "com.amazonaws.ivsrealtime#ConflictException": { "type": "structure", "members": { @@ -4318,18 +4356,26 @@ } }, "com.amazonaws.ivsrealtime#ParticipantRecordingMediaType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "AUDIO_VIDEO", - "name": "AUDIO_VIDEO" - }, - { - "value": "AUDIO_ONLY", - "name": "AUDIO_ONLY" + "type": "enum", + "members": { + "AUDIO_VIDEO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AUDIO_VIDEO" } - ] + }, + "AUDIO_ONLY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AUDIO_ONLY" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + } } }, "com.amazonaws.ivsrealtime#ParticipantRecordingMediaTypeList": { @@ -4455,6 +4501,32 @@ "smithy.api#documentation": "

Summary object describing a participant that has joined a stage.

" } }, + "com.amazonaws.ivsrealtime#ParticipantThumbnailConfiguration": { + "type": "structure", + "members": { + "targetIntervalSeconds": { + "target": "com.amazonaws.ivsrealtime#ThumbnailIntervalSeconds", + "traits": { + "smithy.api#documentation": "

The targeted thumbnail-generation interval in seconds. This is configurable only if\n recordingMode is INTERVAL. Default: 60.

" + } + }, + "storage": { + "target": "com.amazonaws.ivsrealtime#ThumbnailStorageTypeList", + "traits": { + "smithy.api#documentation": "

Indicates the format in which thumbnails are recorded. SEQUENTIAL records all generated thumbnails\n in a serial manner, to the media/thumbnails/high directory. LATEST saves the latest thumbnail\n\t in media/latest_thumbnail/high/thumb.jpg and overwrites it at the interval specified by\n\t targetIntervalSeconds. You can enable both SEQUENTIAL and LATEST.\n\t Default: SEQUENTIAL.

" + } + }, + "recordingMode": { + "target": "com.amazonaws.ivsrealtime#ThumbnailRecordingMode", + "traits": { + "smithy.api#documentation": "

Thumbnail recording mode. Default: DISABLED.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An object representing a configuration of thumbnails for recorded video from an individual participant.

" + } + }, "com.amazonaws.ivsrealtime#ParticipantToken": { "type": "structure", "members": { @@ -4970,6 +5042,12 @@ "traits": { "smithy.api#documentation": "

Array of maps, each of the form string:string (key:value). \n\t This is an optional customer specification, currently used only to specify \n\t the recording format for storing a recording in Amazon S3.

" } + }, + "thumbnailConfigurations": { + "target": "com.amazonaws.ivsrealtime#CompositionThumbnailConfigurationList", + "traits": { + "smithy.api#documentation": "

A complex type that allows you to enable/disable the recording of thumbnails for a Composition\n and modify the interval at which thumbnails are generated for the live session.

" + } } }, "traits": { @@ -5584,6 +5662,62 @@ } } }, + "com.amazonaws.ivsrealtime#ThumbnailIntervalSeconds": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 86400 + } + } + }, + "com.amazonaws.ivsrealtime#ThumbnailRecordingMode": { + "type": "enum", + "members": { + "INTERVAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INTERVAL" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, + "com.amazonaws.ivsrealtime#ThumbnailStorageType": { + "type": "enum", + "members": { + "SEQUENTIAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SEQUENTIAL" + } + }, + "LATEST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LATEST" + } + } + } + }, + "com.amazonaws.ivsrealtime#ThumbnailStorageTypeList": { + "type": "list", + "member": { + "target": "com.amazonaws.ivsrealtime#ThumbnailStorageType" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2 + }, + "smithy.api#uniqueItems": {} + } + }, "com.amazonaws.ivsrealtime#Time": { "type": "timestamp", "traits": { diff --git a/models/keyspaces.json b/models/keyspaces.json index b35628be07..578603ca15 100644 --- a/models/keyspaces.json +++ b/models/keyspaces.json @@ -3584,7 +3584,7 @@ } ], "traits": { - "smithy.api#documentation": "

\n Adds a new Amazon Web Services Region to the keyspace. You can add a new Region to a keyspace that is either a single or a multi-Region keyspace.\n The new replica Region is applied to all tables in the keyspace. For more information, see Add an Amazon Web Services Region to a keyspace in Amazon Keyspaces in the Amazon Keyspaces Developer\n Guide.\n

\n

To change a single-Region to a multi-Region keyspace, you have to enable client-side timestamps\n for all tables in the keyspace. For more information, see\n Client-side timestamps in Amazon Keyspaces in the Amazon Keyspaces Developer\n Guide.

" + "smithy.api#documentation": "

\n Adds a new Amazon Web Services Region to the keyspace. You can add a new Region to a keyspace that is either a single or a multi-Region keyspace.\n Amazon Keyspaces is going to replicate all tables in the keyspace to the new Region. To successfully replicate all tables to the new Region, they\n must use client-side timestamps for conflict resolution. To enable client-side timestamps, specify clientSideTimestamps.status = enabled\n when invoking the API. For more information about client-side timestamps, see\n Client-side timestamps in Amazon Keyspaces in the Amazon Keyspaces Developer\n Guide.

\n

To add a Region to a keyspace using the UpdateKeyspace API, the IAM principal needs permissions for the following IAM actions:

\n
    \n
  • \n

    \n cassandra:Alter\n

    \n
  • \n
  • \n

    \n cassandra:AlterMultiRegionResource\n

    \n
  • \n
  • \n

    \n cassandra:Create\n

    \n
  • \n
  • \n

    \n cassandra:CreateMultiRegionResource\n

    \n
  • \n
  • \n

    \n cassandra:Select\n

    \n
  • \n
  • \n

    \n cassandra:SelectMultiRegionResource\n

    \n
  • \n
  • \n

    \n cassandra:Modify\n

    \n
  • \n
  • \n

    \n cassandra:ModifyMultiRegionResource\n

    \n
  • \n
\n

If the keyspace contains a table that is configured in provisioned mode with auto scaling enabled, \n the following additional IAM actions need to be allowed.

\n
    \n
  • \n

    \n application-autoscaling:RegisterScalableTarget\n

    \n
  • \n
  • \n

    \n application-autoscaling:DeregisterScalableTarget\n

    \n
  • \n
  • \n

    \n application-autoscaling:DescribeScalableTargets\n

    \n
  • \n
  • \n

    \n application-autoscaling:PutScalingPolicy\n

    \n
  • \n
  • \n

    \n application-autoscaling:DescribeScalingPolicies\n

    \n
  • \n
\n

To use the UpdateKeyspace API, the IAM principal also needs permissions to\n create a service-linked role with the following elements:

\n
    \n
  • \n

    \n iam:CreateServiceLinkedRole - The action the principal can perform.

    \n
  • \n
  • \n

    \n arn:aws:iam::*:role/aws-service-role/replication.cassandra.amazonaws.com/AWSServiceRoleForKeyspacesReplication\n - The resource that the action can be\n performed on.

    \n
  • \n
  • \n

    \n iam:AWSServiceName: replication.cassandra.amazonaws.com\n - The only Amazon Web Services service that this role can be attached to is Amazon Keyspaces.

    \n
  • \n
\n

For more information, see Configure the IAM permissions\n required to add an Amazon Web Services Region to a keyspace\n in the Amazon Keyspaces Developer Guide.

" } }, "com.amazonaws.keyspaces#UpdateKeyspaceRequest": { diff --git a/models/m2.json b/models/m2.json index 3a9fc2f4fc..f308820295 100644 --- a/models/m2.json +++ b/models/m2.json @@ -1974,6 +1974,12 @@ "smithy.api#documentation": "

Configures the maintenance window that you want for the runtime environment. The maintenance window must have the format ddd:hh24:mi-ddd:hh24:mi and must be less than 24 hours. The following two examples are valid maintenance windows: sun:23:45-mon:00:15 or sat:01:00-sat:03:00.

\n

If you do not provide a value, a random system-generated value will be assigned.

" } }, + "networkType": { + "target": "com.amazonaws.m2#NetworkType", + "traits": { + "smithy.api#documentation": "

The network type required for the runtime environment.

" + } + }, "clientToken": { "target": "com.amazonaws.m2#ClientToken", "traits": { @@ -2853,6 +2859,12 @@ "smithy.api#documentation": "

The timestamp when the runtime environment was created.

", "smithy.api#required": {} } + }, + "networkType": { + "target": "com.amazonaws.m2#NetworkType", + "traits": { + "smithy.api#documentation": "

The network type supported by the runtime environment.

" + } } }, "traits": { @@ -3960,6 +3972,12 @@ "traits": { "smithy.api#documentation": "

The identifier of a customer managed key.

" } + }, + "networkType": { + "target": "com.amazonaws.m2#NetworkType", + "traits": { + "smithy.api#documentation": "

The network type supported by the runtime environment.

" + } } } }, @@ -5289,6 +5307,21 @@ } } }, + "com.amazonaws.m2#NetworkType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ipv4", + "name": "IPV4" + }, + { + "value": "dual", + "name": "DUAL" + } + ] + } + }, "com.amazonaws.m2#NextToken": { "type": "string", "traits": { diff --git a/models/mediaconnect.json b/models/mediaconnect.json index 69a8937f51..191555a3fb 100644 --- a/models/mediaconnect.json +++ b/models/mediaconnect.json @@ -137,6 +137,12 @@ "smithy.api#required": {} } }, + "MulticastSourceSettings": { + "target": "com.amazonaws.mediaconnect#MulticastSourceSettings", + "traits": { + "smithy.api#jsonName": "multicastSourceSettings" + } + }, "Name": { "target": "com.amazonaws.mediaconnect#__string", "traits": { @@ -1276,6 +1282,12 @@ "smithy.api#required": {} } }, + "MulticastSourceSettings": { + "target": "com.amazonaws.mediaconnect#MulticastSourceSettings", + "traits": { + "smithy.api#jsonName": "multicastSourceSettings" + } + }, "Name": { "target": "com.amazonaws.mediaconnect#__string", "traits": { @@ -6594,6 +6606,21 @@ "smithy.api#documentation": "The settings for source monitoring." } }, + "com.amazonaws.mediaconnect#MulticastSourceSettings": { + "type": "structure", + "members": { + "MulticastSourceIp": { + "target": "com.amazonaws.mediaconnect#__string", + "traits": { + "smithy.api#documentation": "The IP address of the source for source-specific multicast (SSM).", + "smithy.api#jsonName": "multicastSourceIp" + } + } + }, + "traits": { + "smithy.api#documentation": "The settings related to the multicast source." + } + }, "com.amazonaws.mediaconnect#NetworkInterfaceType": { "type": "enum", "members": { @@ -9006,6 +9033,12 @@ "smithy.api#jsonName": "multicastIp" } }, + "MulticastSourceSettings": { + "target": "com.amazonaws.mediaconnect#MulticastSourceSettings", + "traits": { + "smithy.api#jsonName": "multicastSourceSettings" + } + }, "NetworkName": { "target": "com.amazonaws.mediaconnect#__string", "traits": { diff --git a/models/medialive.json b/models/medialive.json index ee2f344e0b..4796ffa365 100644 --- a/models/medialive.json +++ b/models/medialive.json @@ -4478,6 +4478,34 @@ "smithy.api#documentation": "Number of milliseconds to delay the output from the second pipeline.", "smithy.api#jsonName": "sendDelayMs" } + }, + "KlvBehavior": { + "target": "com.amazonaws.medialive#CmafKLVBehavior", + "traits": { + "smithy.api#documentation": "If set to passthrough, passes any KLV data from the input source to this output.", + "smithy.api#jsonName": "klvBehavior" + } + }, + "KlvNameModifier": { + "target": "com.amazonaws.medialive#__stringMax100", + "traits": { + "smithy.api#documentation": "Change the modifier that MediaLive automatically adds to the Streams() name that identifies a KLV track. The default is \"klv\", which means the default name will be Streams(klv.cmfm). Any string you enter here will replace the \"klv\" string.\\nThe modifier can only contain: numbers, letters, plus (+), minus (-), underscore (_) and period (.) and has a maximum length of 100 characters.", + "smithy.api#jsonName": "klvNameModifier" + } + }, + "NielsenId3NameModifier": { + "target": "com.amazonaws.medialive#__stringMax100", + "traits": { + "smithy.api#documentation": "Change the modifier that MediaLive automatically adds to the Streams() name that identifies a Nielsen ID3 track. The default is \"nid3\", which means the default name will be Streams(nid3.cmfm). Any string you enter here will replace the \"nid3\" string.\\nThe modifier can only contain: numbers, letters, plus (+), minus (-), underscore (_) and period (.) and has a maximum length of 100 characters.", + "smithy.api#jsonName": "nielsenId3NameModifier" + } + }, + "Scte35NameModifier": { + "target": "com.amazonaws.medialive#__stringMax100", + "traits": { + "smithy.api#documentation": "Change the modifier that MediaLive automatically adds to the Streams() name for a SCTE 35 track. The default is \"scte\", which means the default name will be Streams(scte.cmfm). Any string you enter here will replace the \"scte\" string.\\nThe modifier can only contain: numbers, letters, plus (+), minus (-), underscore (_) and period (.) and has a maximum length of 100 characters.", + "smithy.api#jsonName": "scte35NameModifier" + } } }, "traits": { @@ -4519,6 +4547,26 @@ "smithy.api#documentation": "Cmaf Ingest Segment Length Units" } }, + "com.amazonaws.medialive#CmafKLVBehavior": { + "type": "enum", + "members": { + "NO_PASSTHROUGH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NO_PASSTHROUGH" + } + }, + "PASSTHROUGH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PASSTHROUGH" + } + } + }, + "traits": { + "smithy.api#documentation": "Cmaf KLVBehavior" + } + }, "com.amazonaws.medialive#CmafNielsenId3Behavior": { "type": "enum", "members": { @@ -14195,7 +14243,7 @@ "TimedMetadataBehavior": { "target": "com.amazonaws.medialive#Fmp4TimedMetadataBehavior", "traits": { - "smithy.api#documentation": "When set to passthrough, timed metadata is passed through from input to output.", + "smithy.api#documentation": "Set to PASSTHROUGH to enable ID3 metadata insertion. To include metadata, you configure other parameters in the output group or individual outputs, or you add an ID3 action to the channel schedule.", "smithy.api#jsonName": "timedMetadataBehavior" } } @@ -16518,6 +16566,26 @@ "smithy.api#documentation": "H265 Color Space Settings" } }, + "com.amazonaws.medialive#H265Deblocking": { + "type": "enum", + "members": { + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + }, + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + } + }, + "traits": { + "smithy.api#documentation": "H265 Deblocking" + } + }, "com.amazonaws.medialive#H265FilterSettings": { "type": "structure", "members": { @@ -17087,6 +17155,13 @@ "smithy.api#documentation": "Sets the minimum QP. If you aren't familiar with quantization adjustment, leave the field empty. MediaLive will\napply an appropriate value.", "smithy.api#jsonName": "minQp" } + }, + "Deblocking": { + "target": "com.amazonaws.medialive#H265Deblocking", + "traits": { + "smithy.api#documentation": "Enable or disable the deblocking filter for this codec. The filter reduces blocking artifacts at block boundaries,\nwhich improves overall video quality. If the filter is disabled, visible block edges might appear in the output,\nespecially at lower bitrates.", + "smithy.api#jsonName": "deblocking" + } } }, "traits": { @@ -17835,20 +17910,20 @@ "Tag": { "target": "com.amazonaws.medialive#__string", "traits": { - "smithy.api#documentation": "ID3 tag to insert into each segment. Supports special keyword identifiers to substitute in segment-related values.\\nSupported keyword identifiers: https://docs.aws.amazon.com/medialive/latest/ug/variable-data-identifiers.html", + "smithy.api#documentation": "Complete this parameter if you want to specify only the metadata, not the entire frame. MediaLive will insert the metadata in a TXXX frame. Enter the value as plain text. You can include standard MediaLive variable data such as the current segment number.", "smithy.api#jsonName": "tag" } }, "Id3": { "target": "com.amazonaws.medialive#__string", "traits": { - "smithy.api#documentation": "Base64 string formatted according to the ID3 specification: http://id3.org/id3v2.4.0-structure", + "smithy.api#documentation": "Complete this parameter if you want to specify the entire ID3 metadata. Enter a base64 string that contains one or more fully formed ID3 tags, according to the ID3 specification: http://id3.org/id3v2.4.0-structure", "smithy.api#jsonName": "id3" } } }, "traits": { - "smithy.api#documentation": "Settings for the action to insert a user-defined ID3 tag in each HLS segment" + "smithy.api#documentation": "Settings for the action to insert ID3 metadata in every segment, in HLS output groups." } }, "com.amazonaws.medialive#HlsId3SegmentTaggingState": { @@ -18355,14 +18430,14 @@ "target": "com.amazonaws.medialive#__string", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "Base64 string formatted according to the ID3 specification: http://id3.org/id3v2.4.0-structure", + "smithy.api#documentation": "Enter a base64 string that contains one or more fully formed ID3 tags.See the ID3 specification: http://id3.org/id3v2.4.0-structure", "smithy.api#jsonName": "id3", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "Settings for the action to emit HLS metadata" + "smithy.api#documentation": "Settings for the action to insert ID3 metadata (as a one-time action) in HLS output groups." } }, "com.amazonaws.medialive#HlsTsFileMode": { @@ -23674,7 +23749,7 @@ "TimedMetadataBehavior": { "target": "com.amazonaws.medialive#M3u8TimedMetadataBehavior", "traits": { - "smithy.api#documentation": "When set to passthrough, timed metadata is passed through from input to output.", + "smithy.api#documentation": "Set to PASSTHROUGH to enable ID3 metadata insertion. To include metadata, you configure other parameters in the output group or individual outputs, or you add an ID3 action to the channel schedule.", "smithy.api#jsonName": "timedMetadataBehavior" } }, @@ -25130,6 +25205,20 @@ "smithy.api#documentation": "ID of the channel in MediaPackage that is the destination for this output group. You do not need to specify the individual inputs in MediaPackage; MediaLive will handle the connection of the two MediaLive pipelines to the two MediaPackage inputs. The MediaPackage channel and MediaLive channel must be in the same region.", "smithy.api#jsonName": "channelId" } + }, + "ChannelGroup": { + "target": "com.amazonaws.medialive#__stringMin1", + "traits": { + "smithy.api#documentation": "Name of the channel group in MediaPackageV2. Only use if you are sending CMAF Ingest output to a CMAF ingest endpoint on a MediaPackage channel that uses MediaPackage v2.", + "smithy.api#jsonName": "channelGroup" + } + }, + "ChannelName": { + "target": "com.amazonaws.medialive#__stringMin1", + "traits": { + "smithy.api#documentation": "Name of the channel in MediaPackageV2. Only use if you are sending CMAF Ingest output to a CMAF ingest endpoint on a MediaPackage channel that uses MediaPackage v2.", + "smithy.api#jsonName": "channelName" + } } }, "traits": { @@ -29335,14 +29424,14 @@ "HlsId3SegmentTaggingSettings": { "target": "com.amazonaws.medialive#HlsId3SegmentTaggingScheduleActionSettings", "traits": { - "smithy.api#documentation": "Action to insert HLS ID3 segment tagging", + "smithy.api#documentation": "Action to insert ID3 metadata in every segment, in HLS output groups", "smithy.api#jsonName": "hlsId3SegmentTaggingSettings" } }, "HlsTimedMetadataSettings": { "target": "com.amazonaws.medialive#HlsTimedMetadataScheduleActionSettings", "traits": { - "smithy.api#documentation": "Action to insert HLS metadata", + "smithy.api#documentation": "Action to insert ID3 metadata once, in HLS output groups", "smithy.api#jsonName": "hlsTimedMetadataSettings" } }, @@ -35050,6 +35139,10 @@ "Tags": { "target": "com.amazonaws.medialive#Tags", "traits": { + "smithy.api#deprecated": { + "since": "2024-11-20", + "message": "This API is deprecated. You must use UpdateTagsForResource instead." + }, "smithy.api#documentation": "A collection of key-value pairs.", "smithy.api#jsonName": "tags" } @@ -37839,6 +37932,16 @@ "smithy.api#documentation": "Placeholder documentation for __string" } }, + "com.amazonaws.medialive#__stringMax100": { + "type": "string", + "traits": { + "smithy.api#documentation": "Placeholder documentation for __stringMax100", + "smithy.api#length": { + "min": 0, + "max": 100 + } + } + }, "com.amazonaws.medialive#__stringMax1000": { "type": "string", "traits": { diff --git a/models/migration-hub.json b/models/migration-hub.json index 3d9f9a2bae..a10fc66f2f 100644 --- a/models/migration-hub.json +++ b/models/migration-hub.json @@ -39,6 +39,9 @@ { "target": "com.amazonaws.migrationhub#AssociateDiscoveredResource" }, + { + "target": "com.amazonaws.migrationhub#AssociateSourceResource" + }, { "target": "com.amazonaws.migrationhub#CreateProgressUpdateStream" }, @@ -57,6 +60,9 @@ { "target": "com.amazonaws.migrationhub#DisassociateDiscoveredResource" }, + { + "target": "com.amazonaws.migrationhub#DisassociateSourceResource" + }, { "target": "com.amazonaws.migrationhub#ImportMigrationTask" }, @@ -72,9 +78,15 @@ { "target": "com.amazonaws.migrationhub#ListMigrationTasks" }, + { + "target": "com.amazonaws.migrationhub#ListMigrationTaskUpdates" + }, { "target": "com.amazonaws.migrationhub#ListProgressUpdateStreams" }, + { + "target": "com.amazonaws.migrationhub#ListSourceResources" + }, { "target": "com.amazonaws.migrationhub#NotifyApplicationState" }, @@ -142,7 +154,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -185,7 +196,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -198,7 +210,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -212,7 +223,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -235,7 +245,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -270,7 +279,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -281,14 +289,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -302,14 +312,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -318,11 +326,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -333,14 +341,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -354,7 +364,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -374,7 +383,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -385,14 +393,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -403,9 +413,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -1074,6 +1086,87 @@ "smithy.api#output": {} } }, + "com.amazonaws.migrationhub#AssociateSourceResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.migrationhub#AssociateSourceResourceRequest" + }, + "output": { + "target": "com.amazonaws.migrationhub#AssociateSourceResourceResult" + }, + "errors": [ + { + "target": "com.amazonaws.migrationhub#AccessDeniedException" + }, + { + "target": "com.amazonaws.migrationhub#DryRunOperation" + }, + { + "target": "com.amazonaws.migrationhub#InternalServerError" + }, + { + "target": "com.amazonaws.migrationhub#InvalidInputException" + }, + { + "target": "com.amazonaws.migrationhub#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.migrationhub#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.migrationhub#ThrottlingException" + }, + { + "target": "com.amazonaws.migrationhub#UnauthorizedOperation" + } + ], + "traits": { + "smithy.api#documentation": "

Associates a source resource with a migration task. For example, the source resource can\n be a source server, an application, or a migration wave.

" + } + }, + "com.amazonaws.migrationhub#AssociateSourceResourceRequest": { + "type": "structure", + "members": { + "ProgressUpdateStream": { + "target": "com.amazonaws.migrationhub#ProgressUpdateStream", + "traits": { + "smithy.api#documentation": "

The name of the progress-update stream, which is used for access control as well as a\n namespace for migration-task names that is implicitly linked to your AWS account. The\n progress-update stream must uniquely identify the migration tool as it is used for all\n updates made by the tool; however, it does not need to be unique for each AWS account\n because it is scoped to the AWS account.

", + "smithy.api#required": {} + } + }, + "MigrationTaskName": { + "target": "com.amazonaws.migrationhub#MigrationTaskName", + "traits": { + "smithy.api#documentation": "

A unique identifier that references the migration task. Do not include\n sensitive data in this field.\n

", + "smithy.api#required": {} + } + }, + "SourceResource": { + "target": "com.amazonaws.migrationhub#SourceResource", + "traits": { + "smithy.api#documentation": "

The source resource that you want to associate.

", + "smithy.api#required": {} + } + }, + "DryRun": { + "target": "com.amazonaws.migrationhub#DryRun", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

This is an optional parameter that you can use to test whether the call will succeed.\n Set this parameter to true to verify that you have the permissions that are\n required to make the call, and that you have specified the other parameters in the call\n correctly.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.migrationhub#AssociateSourceResourceResult": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.migrationhub#ConfigurationId": { "type": "string", "traits": { @@ -1580,6 +1673,87 @@ "smithy.api#output": {} } }, + "com.amazonaws.migrationhub#DisassociateSourceResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.migrationhub#DisassociateSourceResourceRequest" + }, + "output": { + "target": "com.amazonaws.migrationhub#DisassociateSourceResourceResult" + }, + "errors": [ + { + "target": "com.amazonaws.migrationhub#AccessDeniedException" + }, + { + "target": "com.amazonaws.migrationhub#DryRunOperation" + }, + { + "target": "com.amazonaws.migrationhub#InternalServerError" + }, + { + "target": "com.amazonaws.migrationhub#InvalidInputException" + }, + { + "target": "com.amazonaws.migrationhub#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.migrationhub#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.migrationhub#ThrottlingException" + }, + { + "target": "com.amazonaws.migrationhub#UnauthorizedOperation" + } + ], + "traits": { + "smithy.api#documentation": "

Removes the association between a source resource and a migration task.

" + } + }, + "com.amazonaws.migrationhub#DisassociateSourceResourceRequest": { + "type": "structure", + "members": { + "ProgressUpdateStream": { + "target": "com.amazonaws.migrationhub#ProgressUpdateStream", + "traits": { + "smithy.api#documentation": "

The name of the progress-update stream, which is used for access control as well as a\n namespace for migration-task names that is implicitly linked to your AWS account. The\n progress-update stream must uniquely identify the migration tool as it is used for all\n updates made by the tool; however, it does not need to be unique for each AWS account\n because it is scoped to the AWS account.

", + "smithy.api#required": {} + } + }, + "MigrationTaskName": { + "target": "com.amazonaws.migrationhub#MigrationTaskName", + "traits": { + "smithy.api#documentation": "

A unique identifier that references the migration task. Do not include\n sensitive data in this field.\n

", + "smithy.api#required": {} + } + }, + "SourceResourceName": { + "target": "com.amazonaws.migrationhub#SourceResourceName", + "traits": { + "smithy.api#documentation": "

The name that was specified for the source resource.

", + "smithy.api#required": {} + } + }, + "DryRun": { + "target": "com.amazonaws.migrationhub#DryRun", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

This is an optional parameter that you can use to test whether the call will succeed.\n Set this parameter to true to verify that you have the permissions that are\n required to make the call, and that you have specified the other parameters in the call\n correctly.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.migrationhub#DisassociateSourceResourceResult": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.migrationhub#DiscoveredResource": { "type": "structure", "members": { @@ -2037,6 +2211,98 @@ "smithy.api#output": {} } }, + "com.amazonaws.migrationhub#ListMigrationTaskUpdates": { + "type": "operation", + "input": { + "target": "com.amazonaws.migrationhub#ListMigrationTaskUpdatesRequest" + }, + "output": { + "target": "com.amazonaws.migrationhub#ListMigrationTaskUpdatesResult" + }, + "errors": [ + { + "target": "com.amazonaws.migrationhub#AccessDeniedException" + }, + { + "target": "com.amazonaws.migrationhub#InternalServerError" + }, + { + "target": "com.amazonaws.migrationhub#InvalidInputException" + }, + { + "target": "com.amazonaws.migrationhub#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.migrationhub#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.migrationhub#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

This is a paginated API that returns all the migration-task states for the specified\n MigrationTaskName and ProgressUpdateStream.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "MigrationTaskUpdateList", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.migrationhub#ListMigrationTaskUpdatesRequest": { + "type": "structure", + "members": { + "ProgressUpdateStream": { + "target": "com.amazonaws.migrationhub#ProgressUpdateStream", + "traits": { + "smithy.api#documentation": "

The name of the progress-update stream, which is used for access control as well as a\n namespace for migration-task names that is implicitly linked to your AWS account. The\n progress-update stream must uniquely identify the migration tool as it is used for all\n updates made by the tool; however, it does not need to be unique for each AWS account\n because it is scoped to the AWS account.

", + "smithy.api#required": {} + } + }, + "MigrationTaskName": { + "target": "com.amazonaws.migrationhub#MigrationTaskName", + "traits": { + "smithy.api#documentation": "

A unique identifier that references the migration task. Do not include\n sensitive data in this field.\n

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.migrationhub#Token", + "traits": { + "smithy.api#documentation": "

If NextToken was returned by a previous call, there are more results\n available. The value of NextToken is a unique pagination token for each page.\n To retrieve the next page of results, specify the NextToken value that the\n previous call returned. Keep all other arguments unchanged. Each pagination token expires\n after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken\n error.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.migrationhub#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to include in the response. If more results exist than the\n value that you specify here for MaxResults, the response will include a token\n that you can use to retrieve the next set of results.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.migrationhub#ListMigrationTaskUpdatesResult": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.migrationhub#Token", + "traits": { + "smithy.api#documentation": "

If the response includes a NextToken value, that means that there are more\n results available. The value of NextToken is a unique pagination token for\n each page. To retrieve the next page of results, call this API again and specify this\n NextToken value in the request. Keep all other arguments unchanged. Each\n pagination token expires after 24 hours. Using an expired pagination token will return an\n HTTP 400 InvalidToken error.

" + } + }, + "MigrationTaskUpdateList": { + "target": "com.amazonaws.migrationhub#MigrationTaskUpdateList", + "traits": { + "smithy.api#documentation": "

The list of migration-task updates.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.migrationhub#ListMigrationTasks": { "type": "operation", "input": { @@ -2205,6 +2471,98 @@ "smithy.api#output": {} } }, + "com.amazonaws.migrationhub#ListSourceResources": { + "type": "operation", + "input": { + "target": "com.amazonaws.migrationhub#ListSourceResourcesRequest" + }, + "output": { + "target": "com.amazonaws.migrationhub#ListSourceResourcesResult" + }, + "errors": [ + { + "target": "com.amazonaws.migrationhub#AccessDeniedException" + }, + { + "target": "com.amazonaws.migrationhub#InternalServerError" + }, + { + "target": "com.amazonaws.migrationhub#InvalidInputException" + }, + { + "target": "com.amazonaws.migrationhub#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.migrationhub#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.migrationhub#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists all the source resource that are associated with the specified\n MigrationTaskName and ProgressUpdateStream.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "SourceResourceList", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.migrationhub#ListSourceResourcesRequest": { + "type": "structure", + "members": { + "ProgressUpdateStream": { + "target": "com.amazonaws.migrationhub#ProgressUpdateStream", + "traits": { + "smithy.api#documentation": "

The name of the progress-update stream, which is used for access control as well as a\n namespace for migration-task names that is implicitly linked to your AWS account. The\n progress-update stream must uniquely identify the migration tool as it is used for all\n updates made by the tool; however, it does not need to be unique for each AWS account\n because it is scoped to the AWS account.

", + "smithy.api#required": {} + } + }, + "MigrationTaskName": { + "target": "com.amazonaws.migrationhub#MigrationTaskName", + "traits": { + "smithy.api#documentation": "

A unique identifier that references the migration task. Do not store\n confidential data in this field.\n

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.migrationhub#Token", + "traits": { + "smithy.api#documentation": "

If NextToken was returned by a previous call, there are more results\n available. The value of NextToken is a unique pagination token for each page.\n To retrieve the next page of results, specify the NextToken value that the\n previous call returned. Keep all other arguments unchanged. Each pagination token expires\n after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken\n error.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.migrationhub#MaxResultsSourceResources", + "traits": { + "smithy.api#documentation": "

The maximum number of results to include in the response. If more results exist than the\n value that you specify here for MaxResults, the response will include a token\n that you can use to retrieve the next set of results.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.migrationhub#ListSourceResourcesResult": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.migrationhub#Token", + "traits": { + "smithy.api#documentation": "

If the response includes a NextToken value, that means that there are more\n results available. The value of NextToken is a unique pagination token for\n each page. To retrieve the next page of results, call this API again and specify this\n NextToken value in the request. Keep all other arguments unchanged. Each\n pagination token expires after 24 hours. Using an expired pagination token will return an\n HTTP 400 InvalidToken error.

" + } + }, + "SourceResourceList": { + "target": "com.amazonaws.migrationhub#SourceResourceList", + "traits": { + "smithy.api#documentation": "

The list of source resources.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.migrationhub#MaxResults": { "type": "integer", "traits": { @@ -2232,6 +2590,15 @@ } } }, + "com.amazonaws.migrationhub#MaxResultsSourceResources": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 10 + } + } + }, "com.amazonaws.migrationhub#MigrationTask": { "type": "structure", "members": { @@ -2330,6 +2697,35 @@ "target": "com.amazonaws.migrationhub#MigrationTaskSummary" } }, + "com.amazonaws.migrationhub#MigrationTaskUpdate": { + "type": "structure", + "members": { + "UpdateDateTime": { + "target": "com.amazonaws.migrationhub#UpdateDateTime", + "traits": { + "smithy.api#documentation": "

The timestamp for the update.

" + } + }, + "UpdateType": { + "target": "com.amazonaws.migrationhub#UpdateType", + "traits": { + "smithy.api#documentation": "

The type of the update.

" + } + }, + "MigrationTaskState": { + "target": "com.amazonaws.migrationhub#Task" + } + }, + "traits": { + "smithy.api#documentation": "

A migration-task progress update.

" + } + }, + "com.amazonaws.migrationhub#MigrationTaskUpdateList": { + "type": "list", + "member": { + "target": "com.amazonaws.migrationhub#MigrationTaskUpdate" + } + }, "com.amazonaws.migrationhub#NextUpdateSeconds": { "type": "integer", "traits": { @@ -2613,7 +3009,7 @@ } ], "traits": { - "smithy.api#documentation": "

Provides identifying details of the resource being migrated so that it can be associated\n in the Application Discovery Service repository. This association occurs asynchronously\n after PutResourceAttributes returns.

\n \n
    \n
  • \n

    Keep in mind that subsequent calls to PutResourceAttributes will override\n previously stored attributes. For example, if it is first called with a MAC\n address, but later, it is desired to add an IP address, it\n will then be required to call it with both the IP and MAC\n addresses to prevent overriding the MAC address.

    \n
  • \n
  • \n

    Note the instructions regarding the special use case of the \n ResourceAttributeList\n parameter when specifying any\n \"VM\" related value.

    \n
  • \n
\n
\n\n \n

Because this is an asynchronous call, it will always return 200, whether an\n association occurs or not. To confirm if an association was found based on the provided\n details, call ListDiscoveredResources.

\n
" + "smithy.api#documentation": "

Provides identifying details of the resource being migrated so that it can be associated\n in the Application Discovery Service repository. This association occurs asynchronously\n after PutResourceAttributes returns.

\n \n
    \n
  • \n

    Keep in mind that subsequent calls to PutResourceAttributes will override\n previously stored attributes. For example, if it is first called with a MAC\n address, but later, it is desired to add an IP address, it\n will then be required to call it with both the IP and MAC\n addresses to prevent overriding the MAC address.

    \n
  • \n
  • \n

    Note the instructions regarding the special use case of the \n ResourceAttributeList\n parameter when specifying any\n \"VM\" related value.

    \n
  • \n
\n
\n \n

Because this is an asynchronous call, it will always return 200, whether an\n association occurs or not. To confirm if an association was found based on the provided\n details, call ListDiscoveredResources.

\n
" } }, "com.amazonaws.migrationhub#PutResourceAttributesRequest": { @@ -2636,7 +3032,7 @@ "ResourceAttributeList": { "target": "com.amazonaws.migrationhub#ResourceAttributeList", "traits": { - "smithy.api#documentation": "

Information about the resource that is being migrated. This data will be used to map the\n task to a resource in the Application Discovery Service repository.

\n \n

Takes the object array of ResourceAttribute where the Type\n field is reserved for the following values: IPV4_ADDRESS | IPV6_ADDRESS |\n MAC_ADDRESS | FQDN | VM_MANAGER_ID | VM_MANAGED_OBJECT_REFERENCE | VM_NAME | VM_PATH\n | BIOS_ID | MOTHERBOARD_SERIAL_NUMBER where the identifying value can be a\n string up to 256 characters.

\n
\n \n
    \n
  • \n\n

    If any \"VM\" related value is set for a ResourceAttribute object,\n it is required that VM_MANAGER_ID, as a minimum, is always set. If\n VM_MANAGER_ID is not set, then all \"VM\" fields will be discarded\n and \"VM\" fields will not be used for matching the migration task to a server in\n Application Discovery Service repository. See the Example section below for a use case of specifying \"VM\" related\n values.

    \n
  • \n
  • \n

    If a server you are trying to match has multiple IP or MAC addresses, you\n should provide as many as you know in separate type/value pairs passed to the\n ResourceAttributeList parameter to maximize the chances of\n matching.

    \n
  • \n
\n
", + "smithy.api#documentation": "

Information about the resource that is being migrated. This data will be used to map the\n task to a resource in the Application Discovery Service repository.

\n \n

Takes the object array of ResourceAttribute where the Type\n field is reserved for the following values: IPV4_ADDRESS | IPV6_ADDRESS |\n MAC_ADDRESS | FQDN | VM_MANAGER_ID | VM_MANAGED_OBJECT_REFERENCE | VM_NAME | VM_PATH\n | BIOS_ID | MOTHERBOARD_SERIAL_NUMBER where the identifying value can be a\n string up to 256 characters.

\n
\n \n
    \n
  • \n

    If any \"VM\" related value is set for a ResourceAttribute object,\n it is required that VM_MANAGER_ID, as a minimum, is always set. If\n VM_MANAGER_ID is not set, then all \"VM\" fields will be discarded\n and \"VM\" fields will not be used for matching the migration task to a server in\n Application Discovery Service repository. See the Example section below for a use case of specifying \"VM\" related\n values.

    \n
  • \n
  • \n

    If a server you are trying to match has multiple IP or MAC addresses, you\n should provide as many as you know in separate type/value pairs passed to the\n ResourceAttributeList parameter to maximize the chances of\n matching.

    \n
  • \n
\n
", "smithy.api#required": {} } }, @@ -2678,7 +3074,7 @@ } }, "traits": { - "smithy.api#documentation": "

Attribute associated with a resource.

\n

Note the corresponding format required per type listed below:

\n\n\n
\n
IPV4
\n
\n

\n x.x.x.x\n

\n

\n where x is an integer in the range [0,255]\n

\n
\n
IPV6
\n
\n

\n y : y : y : y : y : y : y : y\n

\n

\n where y is a hexadecimal between 0 and FFFF. [0,\n FFFF]\n

\n
\n
MAC_ADDRESS
\n
\n

\n ^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$\n

\n
\n
FQDN
\n
\n

\n ^[^<>{}\\\\\\\\/?,=\\\\p{Cntrl}]{1,256}$\n

\n
\n
" + "smithy.api#documentation": "

Attribute associated with a resource.

\n

Note the corresponding format required per type listed below:

\n
\n
IPV4
\n
\n

\n x.x.x.x\n

\n

\n where x is an integer in the range [0,255]\n

\n
\n
IPV6
\n
\n

\n y : y : y : y : y : y : y : y\n

\n

\n where y is a hexadecimal between 0 and FFFF. [0,\n FFFF]\n

\n
\n
MAC_ADDRESS
\n
\n

\n ^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$\n

\n
\n
FQDN
\n
\n

\n ^[^<>{}\\\\\\\\/?,=\\\\p{Cntrl}]{1,256}$\n

\n
\n
" } }, "com.amazonaws.migrationhub#ResourceAttributeList": { @@ -2808,6 +3204,58 @@ "smithy.api#error": "server" } }, + "com.amazonaws.migrationhub#SourceResource": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.migrationhub#SourceResourceName", + "traits": { + "smithy.api#documentation": "

This is the name that you want to use to identify the resource. If the resource is an\n AWS resource, we recommend that you set this parameter to the ARN of the resource.

", + "smithy.api#required": {} + } + }, + "Description": { + "target": "com.amazonaws.migrationhub#SourceResourceDescription", + "traits": { + "smithy.api#documentation": "

A description that can be free-form text to record additional detail about the resource\n for clarity or later reference.

" + } + }, + "StatusDetail": { + "target": "com.amazonaws.migrationhub#StatusDetail", + "traits": { + "smithy.api#documentation": "

A free-form description of the status of the resource.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A source resource can be a source server, a migration wave, an application, or any other\n resource that you track.

" + } + }, + "com.amazonaws.migrationhub#SourceResourceDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 500 + }, + "smithy.api#pattern": "^.{0,500}$" + } + }, + "com.amazonaws.migrationhub#SourceResourceList": { + "type": "list", + "member": { + "target": "com.amazonaws.migrationhub#SourceResource" + } + }, + "com.amazonaws.migrationhub#SourceResourceName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1600 + } + } + }, "com.amazonaws.migrationhub#Status": { "type": "enum", "members": { @@ -2842,9 +3290,9 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 500 + "max": 2500 }, - "smithy.api#pattern": "^.{0,500}$" + "smithy.api#pattern": "^.{0,2500}$" } }, "com.amazonaws.migrationhub#Task": { @@ -2923,6 +3371,17 @@ }, "com.amazonaws.migrationhub#UpdateDateTime": { "type": "timestamp" + }, + "com.amazonaws.migrationhub#UpdateType": { + "type": "enum", + "members": { + "MigrationTaskStateUpdated": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MIGRATION_TASK_STATE_UPDATED" + } + } + } } } } diff --git a/models/networkmanager.json b/models/networkmanager.json index 8000d04bff..2019fcd2fb 100644 --- a/models/networkmanager.json +++ b/models/networkmanager.json @@ -12880,7 +12880,7 @@ "EdgeLocations": { "target": "com.amazonaws.networkmanager#ExternalRegionCodeList", "traits": { - "smithy.api#documentation": "

One or more edge locations to update for the Direct Connect gateway attachment. The updated array of edge locations overwrites the previous array of locations. EdgeLocations is only used for Direct Connect gateway attachments. Do

" + "smithy.api#documentation": "

One or more edge locations to update for the Direct Connect gateway attachment. The updated array of edge locations overwrites the previous array of locations. EdgeLocations is only used for Direct Connect gateway attachments.

" } } }, diff --git a/models/rds.json b/models/rds.json index 1fde890c40..7906ba4e51 100644 --- a/models/rds.json +++ b/models/rds.json @@ -2874,6 +2874,12 @@ "smithy.api#enumValue": "MYSQL_NATIVE_PASSWORD" } }, + "MYSQL_CACHING_SHA2_PASSWORD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MYSQL_CACHING_SHA2_PASSWORD" + } + }, "POSTGRES_SCRAM_SHA_256": { "target": "smithy.api#Unit", "traits": { diff --git a/models/route-53-domains.json b/models/route-53-domains.json index e78189dd06..01bf154947 100644 --- a/models/route-53-domains.json +++ b/models/route-53-domains.json @@ -3638,10 +3638,7 @@ "com.amazonaws.route53domains#LangCode": { "type": "string", "traits": { - "smithy.api#length": { - "min": 0, - "max": 3 - } + "smithy.api#pattern": "^|[A-Za-z]{2,3}$" } }, "com.amazonaws.route53domains#ListDomains": { @@ -4280,6 +4277,12 @@ "traits": { "smithy.api#enumValue": "TRANSFER_ON_RENEW" } + }, + "RESTORE_DOMAIN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RESTORE_DOMAIN" + } } } }, @@ -4291,7 +4294,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 20 + "max": 21 } } }, @@ -4344,7 +4347,10 @@ "com.amazonaws.route53domains#Price": { "type": "double", "traits": { - "smithy.api#default": 0 + "smithy.api#default": 0, + "smithy.api#range": { + "min": 0.0 + } } }, "com.amazonaws.route53domains#PriceWithCurrency": { diff --git a/models/servicediscovery.json b/models/servicediscovery.json index 483b9e0671..6e5f392f14 100644 --- a/models/servicediscovery.json +++ b/models/servicediscovery.json @@ -636,7 +636,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a specified service. If the service still contains one or more registered instances, the request\n fails.

", + "smithy.api#documentation": "

Deletes a specified service and all associated service attributes. If the service still contains one or more registered instances, the request\n fails.

", "smithy.api#examples": [ { "title": "Example: Delete service", @@ -649,6 +649,68 @@ ] } }, + "com.amazonaws.servicediscovery#DeleteServiceAttributes": { + "type": "operation", + "input": { + "target": "com.amazonaws.servicediscovery#DeleteServiceAttributesRequest" + }, + "output": { + "target": "com.amazonaws.servicediscovery#DeleteServiceAttributesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.servicediscovery#InvalidInput" + }, + { + "target": "com.amazonaws.servicediscovery#ServiceNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes specific attributes associated with a service.

", + "smithy.api#examples": [ + { + "title": "DeleteServiceAttributes example", + "documentation": "Example: Delete service attribute by providing attribute key and service ID", + "input": { + "Attributes": [ + "port" + ], + "ServiceId": "srv-e4anhexample0004" + }, + "output": {} + } + ] + } + }, + "com.amazonaws.servicediscovery#DeleteServiceAttributesRequest": { + "type": "structure", + "members": { + "ServiceId": { + "target": "com.amazonaws.servicediscovery#ResourceId", + "traits": { + "smithy.api#documentation": "

The ID of the service from which the attributes will be deleted.

", + "smithy.api#required": {} + } + }, + "Attributes": { + "target": "com.amazonaws.servicediscovery#ServiceAttributeKeyList", + "traits": { + "smithy.api#documentation": "

A list of keys corresponding to each attribute that you want to delete.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.servicediscovery#DeleteServiceAttributesResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.servicediscovery#DeleteServiceRequest": { "type": "structure", "members": { @@ -978,13 +1040,13 @@ "DnsRecords": { "target": "com.amazonaws.servicediscovery#DnsRecordList", "traits": { - "smithy.api#documentation": "

An array that contains one DnsRecord object for each Route 53 DNS record that you want Cloud Map\n to create when you register an instance.

", + "smithy.api#documentation": "

An array that contains one DnsRecord object for each Route 53 DNS record that you want Cloud Map\n to create when you register an instance.

\n \n

The record type of a service specified in a DnsRecord object can't be updated. To change a record type, you need to delete the service and recreate it with a new\n DnsConfig.

\n
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

A complex type that contains information about the Amazon Route 53 DNS records that you want Cloud Map to create\n when you register an instance.

\n \n

The record types of a service can only be changed by deleting the service and recreating it with a new\n Dnsconfig.

\n
" + "smithy.api#documentation": "

A complex type that contains information about the Amazon Route 53 DNS records that you want Cloud Map to create\n when you register an instance.

" } }, "com.amazonaws.servicediscovery#DnsConfigChange": { @@ -1444,6 +1506,72 @@ "smithy.api#documentation": "

Gets the settings for a specified service.

" } }, + "com.amazonaws.servicediscovery#GetServiceAttributes": { + "type": "operation", + "input": { + "target": "com.amazonaws.servicediscovery#GetServiceAttributesRequest" + }, + "output": { + "target": "com.amazonaws.servicediscovery#GetServiceAttributesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.servicediscovery#InvalidInput" + }, + { + "target": "com.amazonaws.servicediscovery#ServiceNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

Returns the attributes associated with a specified service.

", + "smithy.api#examples": [ + { + "title": "GetServiceAttributes Example", + "documentation": "This example gets the attributes for a specified service.", + "input": { + "ServiceId": "srv-e4anhexample0004" + }, + "output": { + "ServiceAttributes": { + "Attributes": { + "port": "80" + }, + "ServiceArn": "arn:aws:servicediscovery:us-west-2:123456789012:service/srv-e4anhexample0004" + } + } + } + ] + } + }, + "com.amazonaws.servicediscovery#GetServiceAttributesRequest": { + "type": "structure", + "members": { + "ServiceId": { + "target": "com.amazonaws.servicediscovery#ResourceId", + "traits": { + "smithy.api#documentation": "

The ID of the service that you want to get attributes for.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.servicediscovery#GetServiceAttributesResponse": { + "type": "structure", + "members": { + "ServiceAttributes": { + "target": "com.amazonaws.servicediscovery#ServiceAttributes", + "traits": { + "smithy.api#documentation": "

A complex type that contains the service ARN and a list of attribute key-value pairs associated with the service.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.servicediscovery#GetServiceRequest": { "type": "structure", "members": { @@ -2424,7 +2552,8 @@ "smithy.api#length": { "min": 0, "max": 1024 - } + }, + "smithy.api#pattern": "^[!-~]{1,1024}$" } }, "com.amazonaws.servicediscovery#NamespaceNameHttp": { @@ -3267,6 +3396,9 @@ { "target": "com.amazonaws.servicediscovery#DeleteService" }, + { + "target": "com.amazonaws.servicediscovery#DeleteServiceAttributes" + }, { "target": "com.amazonaws.servicediscovery#DeregisterInstance" }, @@ -3291,6 +3423,9 @@ { "target": "com.amazonaws.servicediscovery#GetService" }, + { + "target": "com.amazonaws.servicediscovery#GetServiceAttributes" + }, { "target": "com.amazonaws.servicediscovery#ListInstances" }, @@ -3329,6 +3464,9 @@ }, { "target": "com.amazonaws.servicediscovery#UpdateService" + }, + { + "target": "com.amazonaws.servicediscovery#UpdateServiceAttributes" } ], "traits": { @@ -4540,6 +4678,84 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.servicediscovery#ServiceAttributeKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 255 + } + } + }, + "com.amazonaws.servicediscovery#ServiceAttributeKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.servicediscovery#ServiceAttributeKey" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 30 + } + } + }, + "com.amazonaws.servicediscovery#ServiceAttributeValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + } + } + }, + "com.amazonaws.servicediscovery#ServiceAttributes": { + "type": "structure", + "members": { + "ServiceArn": { + "target": "com.amazonaws.servicediscovery#Arn", + "traits": { + "smithy.api#documentation": "

The ARN of the service that the attributes are associated with.

" + } + }, + "Attributes": { + "target": "com.amazonaws.servicediscovery#ServiceAttributesMap", + "traits": { + "smithy.api#documentation": "

A string map that contains the following information for the service that you specify in\n ServiceArn:

\n
    \n
  • \n

    The attributes that apply to the service.

    \n
  • \n
  • \n

    For each attribute, the applicable value.

    \n
  • \n
\n

You can specify a total of 30 attributes.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A complex type that contains information about attributes associated with a specific service.

" + } + }, + "com.amazonaws.servicediscovery#ServiceAttributesLimitExceededException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.servicediscovery#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

The attribute can't be added to the service because you've exceeded the quota for the number of attributes you can add to a service.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.servicediscovery#ServiceAttributesMap": { + "type": "map", + "key": { + "target": "com.amazonaws.servicediscovery#ServiceAttributeKey" + }, + "value": { + "target": "com.amazonaws.servicediscovery#ServiceAttributeValue" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 30 + } + } + }, "com.amazonaws.servicediscovery#ServiceChange": { "type": "structure", "members": { @@ -5330,6 +5546,71 @@ ] } }, + "com.amazonaws.servicediscovery#UpdateServiceAttributes": { + "type": "operation", + "input": { + "target": "com.amazonaws.servicediscovery#UpdateServiceAttributesRequest" + }, + "output": { + "target": "com.amazonaws.servicediscovery#UpdateServiceAttributesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.servicediscovery#InvalidInput" + }, + { + "target": "com.amazonaws.servicediscovery#ServiceAttributesLimitExceededException" + }, + { + "target": "com.amazonaws.servicediscovery#ServiceNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

Submits a request to update a specified service to add service-level attributes.

", + "smithy.api#examples": [ + { + "title": "UpdateServiceAttributes Example", + "documentation": "This example submits a request to update the specified service to add a port attribute with the value 80.", + "input": { + "ServiceId": "srv-e4anhexample0004", + "Attributes": { + "port": "80" + } + }, + "output": {} + } + ] + } + }, + "com.amazonaws.servicediscovery#UpdateServiceAttributesRequest": { + "type": "structure", + "members": { + "ServiceId": { + "target": "com.amazonaws.servicediscovery#ResourceId", + "traits": { + "smithy.api#documentation": "

The ID of the service that you want to update.

", + "smithy.api#required": {} + } + }, + "Attributes": { + "target": "com.amazonaws.servicediscovery#ServiceAttributesMap", + "traits": { + "smithy.api#documentation": "

A string map that contains attribute key-value pairs.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.servicediscovery#UpdateServiceAttributesResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.servicediscovery#UpdateServiceRequest": { "type": "structure", "members": { @@ -5343,7 +5624,7 @@ "Service": { "target": "com.amazonaws.servicediscovery#ServiceChange", "traits": { - "smithy.api#documentation": "

A complex type that contains the new settings for the service.

", + "smithy.api#documentation": "

A complex type that contains the new settings for the service. You can specify a maximum of 30 attributes (key-value pairs).

", "smithy.api#required": {} } } diff --git a/models/sesv2.json b/models/sesv2.json index ee582febf8..b0dd599785 100644 --- a/models/sesv2.json +++ b/models/sesv2.json @@ -1893,6 +1893,87 @@ "smithy.api#output": {} } }, + "com.amazonaws.sesv2#CreateMultiRegionEndpoint": { + "type": "operation", + "input": { + "target": "com.amazonaws.sesv2#CreateMultiRegionEndpointRequest" + }, + "output": { + "target": "com.amazonaws.sesv2#CreateMultiRegionEndpointResponse" + }, + "errors": [ + { + "target": "com.amazonaws.sesv2#AlreadyExistsException" + }, + { + "target": "com.amazonaws.sesv2#BadRequestException" + }, + { + "target": "com.amazonaws.sesv2#LimitExceededException" + }, + { + "target": "com.amazonaws.sesv2#TooManyRequestsException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a multi-region endpoint (global-endpoint).

\n

The primary region is going to be the AWS-Region where the operation is executed.\n The secondary region has to be provided in request's parameters.\n From the data flow standpoint there is no difference between primary\n and secondary regions - sending traffic will be split equally between the two.\n The primary region is the region where the resource has been created and where it can be managed.\n

", + "smithy.api#http": { + "method": "POST", + "uri": "/v2/email/multi-region-endpoints", + "code": 200 + } + } + }, + "com.amazonaws.sesv2#CreateMultiRegionEndpointRequest": { + "type": "structure", + "members": { + "EndpointName": { + "target": "com.amazonaws.sesv2#EndpointName", + "traits": { + "smithy.api#documentation": "

The name of the multi-region endpoint (global-endpoint).

", + "smithy.api#required": {} + } + }, + "Details": { + "target": "com.amazonaws.sesv2#Details", + "traits": { + "smithy.api#documentation": "

Contains details of a multi-region endpoint (global-endpoint) being created.

", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.sesv2#TagList", + "traits": { + "smithy.api#documentation": "

An array of objects that define the tags (keys and values) to associate with the multi-region endpoint (global-endpoint).

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Represents a request to create a multi-region endpoint (global-endpoint).

", + "smithy.api#input": {} + } + }, + "com.amazonaws.sesv2#CreateMultiRegionEndpointResponse": { + "type": "structure", + "members": { + "Status": { + "target": "com.amazonaws.sesv2#Status", + "traits": { + "smithy.api#documentation": "

A status of the multi-region endpoint (global-endpoint) right after the create request.

\n
    \n
  • \n

    \n CREATING – The resource is being provisioned.

    \n
  • \n
  • \n

    \n READY – The resource is ready to use.

    \n
  • \n
  • \n

    \n FAILED – The resource failed to be provisioned.

    \n
  • \n
  • \n

    \n DELETING – The resource is being deleted as requested.

    \n
  • \n
" + } + }, + "EndpointId": { + "target": "com.amazonaws.sesv2#EndpointId", + "traits": { + "smithy.api#documentation": "

The ID of the multi-region endpoint (global-endpoint).

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An HTTP 200 response if the request succeeds, or an error message if the request\n fails.

", + "smithy.api#output": {} + } + }, "com.amazonaws.sesv2#CustomRedirectDomain": { "type": "string", "traits": { @@ -2607,6 +2688,69 @@ "smithy.api#output": {} } }, + "com.amazonaws.sesv2#DeleteMultiRegionEndpoint": { + "type": "operation", + "input": { + "target": "com.amazonaws.sesv2#DeleteMultiRegionEndpointRequest" + }, + "output": { + "target": "com.amazonaws.sesv2#DeleteMultiRegionEndpointResponse" + }, + "errors": [ + { + "target": "com.amazonaws.sesv2#BadRequestException" + }, + { + "target": "com.amazonaws.sesv2#ConcurrentModificationException" + }, + { + "target": "com.amazonaws.sesv2#NotFoundException" + }, + { + "target": "com.amazonaws.sesv2#TooManyRequestsException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes a multi-region endpoint (global-endpoint).

\n

Only multi-region endpoints (global-endpoints) whose primary region is the AWS-Region\n where operation is executed can be deleted.

", + "smithy.api#http": { + "method": "DELETE", + "uri": "/v2/email/multi-region-endpoints/{EndpointName}", + "code": 200 + } + } + }, + "com.amazonaws.sesv2#DeleteMultiRegionEndpointRequest": { + "type": "structure", + "members": { + "EndpointName": { + "target": "com.amazonaws.sesv2#EndpointName", + "traits": { + "smithy.api#documentation": "

The name of the multi-region endpoint (global-endpoint) to be deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Represents a request to delete a multi-region endpoint (global-endpoint).

", + "smithy.api#input": {} + } + }, + "com.amazonaws.sesv2#DeleteMultiRegionEndpointResponse": { + "type": "structure", + "members": { + "Status": { + "target": "com.amazonaws.sesv2#Status", + "traits": { + "smithy.api#documentation": "

A status of the multi-region endpoint (global-endpoint) right after the delete request.

\n
    \n
  • \n

    \n CREATING – The resource is being provisioned.

    \n
  • \n
  • \n

    \n READY – The resource is ready to use.

    \n
  • \n
  • \n

    \n FAILED – The resource failed to be provisioned.

    \n
  • \n
  • \n

    \n DELETING – The resource is being deleted as requested.

    \n
  • \n
" + } + } + }, + "traits": { + "smithy.api#documentation": "

An HTTP 200 response if the request succeeds, or an error message if the request\n fails.

", + "smithy.api#output": {} + } + }, "com.amazonaws.sesv2#DeleteSuppressedDestination": { "type": "operation", "input": { @@ -2861,6 +3005,21 @@ "smithy.api#documentation": "

An object that describes the recipients for an email.

\n \n

Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531. For this reason, the\n local part of a destination email address (the part of the\n email address that precedes the @ sign) may only contain 7-bit ASCII\n characters. If the domain part of an address (the\n part after the @ sign) contains non-ASCII characters, they must be encoded using\n Punycode, as described in RFC3492.

\n
" } }, + "com.amazonaws.sesv2#Details": { + "type": "structure", + "members": { + "RoutesDetails": { + "target": "com.amazonaws.sesv2#RoutesDetails", + "traits": { + "smithy.api#documentation": "

A list of route configuration details. Must contain exactly one route configuration.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

An object that contains configuration details of multi-region endpoint (global-endpoint).

" + } + }, "com.amazonaws.sesv2#DiagnosticCode": { "type": "string" }, @@ -2939,7 +3098,7 @@ "SigningAttributesOrigin": { "target": "com.amazonaws.sesv2#DkimSigningAttributesOrigin", "traits": { - "smithy.api#documentation": "

A string that indicates how DKIM was configured for the identity. These are the\n possible values:

\n
    \n
  • \n

    \n AWS_SES – Indicates that DKIM was configured for the\n identity by using Easy DKIM.

    \n
  • \n
  • \n

    \n EXTERNAL – Indicates that DKIM was configured for the\n identity by using Bring Your Own DKIM (BYODKIM).

    \n
  • \n
" + "smithy.api#documentation": "

A string that indicates how DKIM was configured for the identity. These are the\n possible values:

\n
    \n
  • \n

    \n AWS_SES – Indicates that DKIM was configured for the\n identity by using Easy DKIM.

    \n
  • \n
  • \n

    \n EXTERNAL – Indicates that DKIM was configured for the\n identity by using Bring Your Own DKIM (BYODKIM).

    \n
  • \n
  • \n

    \n AWS_SES_AF_SOUTH_1 – Indicates that DKIM was configured for the identity by\n replicating signing attributes from a parent identity in Africa (Cape Town) region using Deterministic Easy-DKIM\n (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_EU_NORTH_1 – Indicates that DKIM was configured for the identity by\n replicating signing attributes from a parent identity in Europe (Stockholm) region using Deterministic Easy-DKIM\n (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_AP_SOUTH_1 – Indicates that DKIM was configured for the identity by\n replicating signing attributes from a parent identity in Asia Pacific (Mumbai) region using Deterministic Easy-DKIM\n (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_EU_WEST_3 – Indicates that DKIM was configured for the identity by\n replicating signing attributes from a parent identity in Europe (Paris) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_EU_WEST_2 – Indicates that DKIM was configured for the identity by\n replicating signing attributes from a parent identity in Europe (London) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_EU_SOUTH_1 – Indicates that DKIM was configured for the identity by\n replicating signing attributes from a parent identity in Europe (Milan) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_EU_WEST_1 – Indicates that DKIM was configured for the identity by\n replicating signing attributes from a parent identity in Europe (Ireland) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_AP_NORTHEAST_3 – Indicates that DKIM was configured for the identity by\n replicating signing attributes from a parent identity in Asia Pacific (Osaka) region using Deterministic Easy-DKIM\n (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_AP_NORTHEAST_2 – Indicates that DKIM was configured for the identity by\n replicating signing attributes from a parent identity in Asia Pacific (Seoul) region using Deterministic Easy-DKIM\n (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_ME_SOUTH_1 – Indicates that DKIM was configured for the identity by\n replicating signing attributes from a parent identity in Middle East (Bahrain) region using Deterministic Easy-DKIM\n (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_AP_NORTHEAST_1 – Indicates that DKIM was configured for the identity by\n replicating signing attributes from a parent identity in Asia Pacific (Tokyo) region using Deterministic Easy-DKIM\n (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_IL_CENTRAL_1 – Indicates that DKIM was configured for the identity by\n replicating signing attributes from a parent identity in Israel (Tel Aviv) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_SA_EAST_1 – Indicates that DKIM was configured for the identity by\n replicating signing attributes from a parent identity in South America (São Paulo) region using Deterministic Easy-DKIM\n (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_CA_CENTRAL_1 – Indicates that DKIM was configured for the identity by\n replicating signing attributes from a parent identity in Canada (Central) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_AP_SOUTHEAST_1 – Indicates that DKIM was configured for the identity by\n replicating signing attributes from a parent identity in Asia Pacific (Singapore) region using Deterministic Easy-DKIM\n (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_AP_SOUTHEAST_2 – Indicates that DKIM was configured for the identity by\n replicating signing attributes from a parent identity in Asia Pacific (Sydney) region using Deterministic Easy-DKIM\n (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_AP_SOUTHEAST_3 – Indicates that DKIM was configured for the identity by\n replicating signing attributes from a parent identity in Asia Pacific (Jakarta) region using Deterministic Easy-DKIM\n (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_EU_CENTRAL_1 – Indicates that DKIM was configured for the identity by\n replicating signing attributes from a parent identity in Europe (Frankfurt) region using Deterministic Easy-DKIM\n (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_US_EAST_1 – Indicates that DKIM was configured for the identity by\n replicating signing attributes from a parent identity in US East (N. Virginia) region using Deterministic Easy-DKIM\n (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_US_EAST_2 – Indicates that DKIM was configured for the identity by\n replicating signing attributes from a parent identity in US East (Ohio) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_US_WEST_1 – Indicates that DKIM was configured for the identity by\n replicating signing attributes from a parent identity in US West (N. California) region using Deterministic Easy-DKIM\n (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_US_WEST_2 – Indicates that DKIM was configured for the identity by\n replicating signing attributes from a parent identity in US West (Oregon) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
" } }, "NextSigningKeyLength": { @@ -2985,6 +3144,12 @@ "traits": { "smithy.api#documentation": "

[Easy DKIM] The key length of the future DKIM key pair to be generated. This can be\n changed at most once per day.

" } + }, + "DomainSigningAttributesOrigin": { + "target": "com.amazonaws.sesv2#DkimSigningAttributesOrigin", + "traits": { + "smithy.api#documentation": "

The attribute to use for configuring DKIM for the identity depends on the\n operation:\n

\n
    \n
  1. \n

    For PutEmailIdentityDkimSigningAttributes:\n

    \n \n
  2. \n
  3. \n

    For CreateEmailIdentity when replicating a parent identity's DKIM\n configuration:\n

    \n
      \n
    • \n

      Allowed values: All values except AWS_SES and\n EXTERNAL\n

      \n
    • \n
    \n
  4. \n
\n
    \n
  • \n

    \n AWS_SES – Configure DKIM for the identity by using Easy DKIM.\n

    \n
  • \n
  • \n

    \n EXTERNAL – Configure DKIM for the identity by using Bring Your Own DKIM\n (BYODKIM).\n

    \n
  • \n
  • \n

    \n AWS_SES_AF_SOUTH_1 – Configure DKIM for the identity by replicating from a parent\n identity in Africa (Cape Town) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_EU_NORTH_1 – Configure DKIM for the identity by replicating from a parent\n identity in Europe (Stockholm) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_AP_SOUTH_1 – Configure DKIM for the identity by replicating from a parent\n identity in Asia Pacific (Mumbai) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_EU_WEST_3 – Configure DKIM for the identity by replicating from a parent\n identity in Europe (Paris) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_EU_WEST_2 – Configure DKIM for the identity by replicating from a parent\n identity in Europe (London) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_EU_SOUTH_1 – Configure DKIM for the identity by replicating from a parent\n identity in Europe (Milan) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_EU_WEST_1 – Configure DKIM for the identity by replicating from a parent\n identity in Europe (Ireland) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_AP_NORTHEAST_3 – Configure DKIM for the identity by replicating from a\n parent identity in Asia Pacific (Osaka) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_AP_NORTHEAST_2 – Configure DKIM for the identity by replicating from a\n parent identity in Asia Pacific (Seoul) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_ME_SOUTH_1 – Configure DKIM for the identity by replicating from a parent\n identity in Middle East (Bahrain) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_AP_NORTHEAST_1 – Configure DKIM for the identity by replicating from a\n parent identity in Asia Pacific (Tokyo) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_IL_CENTRAL_1 – Configure DKIM for the identity by replicating from a\n parent identity in Israel (Tel Aviv) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_SA_EAST_1 – Configure DKIM for the identity by replicating from a parent\n identity in South America (São Paulo) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_CA_CENTRAL_1 – Configure DKIM for the identity by replicating from a\n parent identity in Canada (Central) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_AP_SOUTHEAST_1 – Configure DKIM for the identity by replicating from a\n parent identity in Asia Pacific (Singapore) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_AP_SOUTHEAST_2 – Configure DKIM for the identity by replicating from a\n parent identity in Asia Pacific (Sydney) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_AP_SOUTHEAST_3 – Configure DKIM for the identity by replicating from a\n parent identity in Asia Pacific (Jakarta) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_EU_CENTRAL_1 – Configure DKIM for the identity by replicating from a\n parent identity in Europe (Frankfurt) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_US_EAST_1 – Configure DKIM for the identity by replicating from a parent\n identity in US East (N. Virginia) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_US_EAST_2 – Configure DKIM for the identity by replicating from a parent\n identity in US East (Ohio) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_US_WEST_1 – Configure DKIM for the identity by replicating from a parent\n identity in US West (N. California) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
  • \n

    \n AWS_SES_US_WEST_2 – Configure DKIM for the identity by replicating from a parent\n identity in US West (Oregon) region using Deterministic Easy-DKIM (DEED).\n

    \n
  • \n
" + } } }, "traits": { @@ -3005,6 +3170,138 @@ "traits": { "smithy.api#enumValue": "EXTERNAL" } + }, + "AWS_SES_AF_SOUTH_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SES_AF_SOUTH_1" + } + }, + "AWS_SES_EU_NORTH_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SES_EU_NORTH_1" + } + }, + "AWS_SES_AP_SOUTH_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SES_AP_SOUTH_1" + } + }, + "AWS_SES_EU_WEST_3": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SES_EU_WEST_3" + } + }, + "AWS_SES_EU_WEST_2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SES_EU_WEST_2" + } + }, + "AWS_SES_EU_SOUTH_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SES_EU_SOUTH_1" + } + }, + "AWS_SES_EU_WEST_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SES_EU_WEST_1" + } + }, + "AWS_SES_AP_NORTHEAST_3": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SES_AP_NORTHEAST_3" + } + }, + "AWS_SES_AP_NORTHEAST_2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SES_AP_NORTHEAST_2" + } + }, + "AWS_SES_ME_SOUTH_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SES_ME_SOUTH_1" + } + }, + "AWS_SES_AP_NORTHEAST_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SES_AP_NORTHEAST_1" + } + }, + "AWS_SES_IL_CENTRAL_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SES_IL_CENTRAL_1" + } + }, + "AWS_SES_SA_EAST_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SES_SA_EAST_1" + } + }, + "AWS_SES_CA_CENTRAL_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SES_CA_CENTRAL_1" + } + }, + "AWS_SES_AP_SOUTHEAST_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SES_AP_SOUTHEAST_1" + } + }, + "AWS_SES_AP_SOUTHEAST_2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SES_AP_SOUTHEAST_2" + } + }, + "AWS_SES_AP_SOUTHEAST_3": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SES_AP_SOUTHEAST_3" + } + }, + "AWS_SES_EU_CENTRAL_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SES_EU_CENTRAL_1" + } + }, + "AWS_SES_US_EAST_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SES_US_EAST_1" + } + }, + "AWS_SES_US_EAST_2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SES_US_EAST_2" + } + }, + "AWS_SES_US_WEST_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SES_US_WEST_1" + } + }, + "AWS_SES_US_WEST_2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SES_US_WEST_2" + } } } }, @@ -3457,6 +3754,23 @@ "com.amazonaws.sesv2#EnabledWrapper": { "type": "boolean" }, + "com.amazonaws.sesv2#EndpointId": { + "type": "string", + "traits": { + "smithy.api#documentation": "

The ID of the multi-region endpoint (global-endpoint).

" + } + }, + "com.amazonaws.sesv2#EndpointName": { + "type": "string", + "traits": { + "smithy.api#documentation": "

The name of the multi-region endpoint (global-endpoint).

", + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#pattern": "^[\\w\\-_]+$" + } + }, "com.amazonaws.sesv2#EngagementEventType": { "type": "enum", "members": { @@ -5613,13 +5927,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.sesv2#GetSuppressedDestination": { + "com.amazonaws.sesv2#GetMultiRegionEndpoint": { "type": "operation", "input": { - "target": "com.amazonaws.sesv2#GetSuppressedDestinationRequest" + "target": "com.amazonaws.sesv2#GetMultiRegionEndpointRequest" }, "output": { - "target": "com.amazonaws.sesv2#GetSuppressedDestinationResponse" + "target": "com.amazonaws.sesv2#GetMultiRegionEndpointResponse" }, "errors": [ { @@ -5633,56 +5947,146 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves information about a specific email address that's on the suppression list\n for your account.

", + "smithy.api#documentation": "

Displays the multi-region endpoint (global-endpoint) configuration.

\n

Only multi-region endpoints (global-endpoints) whose primary region is the AWS-Region\n where operation is executed can be displayed.

", "smithy.api#http": { "method": "GET", - "uri": "/v2/email/suppression/addresses/{EmailAddress}", + "uri": "/v2/email/multi-region-endpoints/{EndpointName}", "code": 200 } } }, - "com.amazonaws.sesv2#GetSuppressedDestinationRequest": { + "com.amazonaws.sesv2#GetMultiRegionEndpointRequest": { "type": "structure", "members": { - "EmailAddress": { - "target": "com.amazonaws.sesv2#EmailAddress", + "EndpointName": { + "target": "com.amazonaws.sesv2#EndpointName", "traits": { - "smithy.api#documentation": "

The email address that's on the account suppression list.

", + "smithy.api#documentation": "

The name of the multi-region endpoint (global-endpoint).

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

A request to retrieve information about an email address that's on the suppression\n list for your account.

", + "smithy.api#documentation": "

Represents a request to display the multi-region endpoint (global-endpoint).

", "smithy.api#input": {} } }, - "com.amazonaws.sesv2#GetSuppressedDestinationResponse": { + "com.amazonaws.sesv2#GetMultiRegionEndpointResponse": { "type": "structure", "members": { - "SuppressedDestination": { - "target": "com.amazonaws.sesv2#SuppressedDestination", + "EndpointName": { + "target": "com.amazonaws.sesv2#EndpointName", "traits": { - "smithy.api#documentation": "

An object containing information about the suppressed email address.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The name of the multi-region endpoint (global-endpoint).

" } - } - }, - "traits": { - "smithy.api#documentation": "

Information about the suppressed email address.

", - "smithy.api#output": {} - } - }, - "com.amazonaws.sesv2#GuardianAttributes": { - "type": "structure", - "members": { - "OptimizedSharedDelivery": { - "target": "com.amazonaws.sesv2#FeatureStatus", + }, + "EndpointId": { + "target": "com.amazonaws.sesv2#EndpointId", "traits": { - "smithy.api#documentation": "

Specifies the status of your VDM optimized shared delivery. Can be one of the\n following:

\n
    \n
  • \n

    \n ENABLED – Amazon SES enables optimized shared delivery for your\n account.

    \n
  • \n
  • \n

    \n DISABLED – Amazon SES disables optimized shared delivery for\n your account.

    \n
  • \n
" + "smithy.api#documentation": "

The ID of the multi-region endpoint (global-endpoint).

" } - } + }, + "Routes": { + "target": "com.amazonaws.sesv2#Routes", + "traits": { + "smithy.api#documentation": "

Contains routes information for the multi-region endpoint (global-endpoint).

" + } + }, + "Status": { + "target": "com.amazonaws.sesv2#Status", + "traits": { + "smithy.api#documentation": "

The status of the multi-region endpoint (global-endpoint).

\n
    \n
  • \n

    \n CREATING – The resource is being provisioned.

    \n
  • \n
  • \n

    \n READY – The resource is ready to use.

    \n
  • \n
  • \n

    \n FAILED – The resource failed to be provisioned.

    \n
  • \n
  • \n

    \n DELETING – The resource is being deleted as requested.

    \n
  • \n
" + } + }, + "CreatedTimestamp": { + "target": "com.amazonaws.sesv2#Timestamp", + "traits": { + "smithy.api#documentation": "

The time stamp of when the multi-region endpoint (global-endpoint) was created.

" + } + }, + "LastUpdatedTimestamp": { + "target": "com.amazonaws.sesv2#Timestamp", + "traits": { + "smithy.api#documentation": "

The time stamp of when the multi-region endpoint (global-endpoint) was last updated.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An HTTP 200 response if the request succeeds, or an error message if the request\n fails.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.sesv2#GetSuppressedDestination": { + "type": "operation", + "input": { + "target": "com.amazonaws.sesv2#GetSuppressedDestinationRequest" + }, + "output": { + "target": "com.amazonaws.sesv2#GetSuppressedDestinationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.sesv2#BadRequestException" + }, + { + "target": "com.amazonaws.sesv2#NotFoundException" + }, + { + "target": "com.amazonaws.sesv2#TooManyRequestsException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves information about a specific email address that's on the suppression list\n for your account.

", + "smithy.api#http": { + "method": "GET", + "uri": "/v2/email/suppression/addresses/{EmailAddress}", + "code": 200 + } + } + }, + "com.amazonaws.sesv2#GetSuppressedDestinationRequest": { + "type": "structure", + "members": { + "EmailAddress": { + "target": "com.amazonaws.sesv2#EmailAddress", + "traits": { + "smithy.api#documentation": "

The email address that's on the account suppression list.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A request to retrieve information about an email address that's on the suppression\n list for your account.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.sesv2#GetSuppressedDestinationResponse": { + "type": "structure", + "members": { + "SuppressedDestination": { + "target": "com.amazonaws.sesv2#SuppressedDestination", + "traits": { + "smithy.api#documentation": "

An object containing information about the suppressed email address.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the suppressed email address.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.sesv2#GuardianAttributes": { + "type": "structure", + "members": { + "OptimizedSharedDelivery": { + "target": "com.amazonaws.sesv2#FeatureStatus", + "traits": { + "smithy.api#documentation": "

Specifies the status of your VDM optimized shared delivery. Can be one of the\n following:

\n
    \n
  • \n

    \n ENABLED – Amazon SES enables optimized shared delivery for your\n account.

    \n
  • \n
  • \n

    \n DISABLED – Amazon SES disables optimized shared delivery for\n your account.

    \n
  • \n
" + } + } }, "traits": { "smithy.api#documentation": "

An object containing additional settings for your VDM configuration as applicable to\n the Guardian.

" @@ -7085,6 +7489,81 @@ "smithy.api#documentation": "

An object used to specify a list or topic to which an email belongs, which will be\n used when a contact chooses to unsubscribe.

" } }, + "com.amazonaws.sesv2#ListMultiRegionEndpoints": { + "type": "operation", + "input": { + "target": "com.amazonaws.sesv2#ListMultiRegionEndpointsRequest" + }, + "output": { + "target": "com.amazonaws.sesv2#ListMultiRegionEndpointsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.sesv2#BadRequestException" + }, + { + "target": "com.amazonaws.sesv2#TooManyRequestsException" + } + ], + "traits": { + "smithy.api#documentation": "

List the multi-region endpoints (global-endpoints).

\n

Only multi-region endpoints (global-endpoints) whose primary region is the AWS-Region\n where operation is executed will be listed.

", + "smithy.api#http": { + "method": "GET", + "uri": "/v2/email/multi-region-endpoints", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "MultiRegionEndpoints", + "pageSize": "PageSize" + } + } + }, + "com.amazonaws.sesv2#ListMultiRegionEndpointsRequest": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.sesv2#NextTokenV2", + "traits": { + "smithy.api#documentation": "

A token returned from a previous call to ListMultiRegionEndpoints to indicate\n the position in the list of multi-region endpoints (global-endpoints).

", + "smithy.api#httpQuery": "NextToken" + } + }, + "PageSize": { + "target": "com.amazonaws.sesv2#PageSizeV2", + "traits": { + "smithy.api#documentation": "

The number of results to show in a single call to ListMultiRegionEndpoints.\n If the number of results is larger than the number you specified in this parameter,\n the response includes a NextToken element\n that you can use to retrieve the next page of results.\n

", + "smithy.api#httpQuery": "PageSize" + } + } + }, + "traits": { + "smithy.api#documentation": "

Represents a request to list all the multi-region endpoints (global-endpoints)\n whose primary region is the AWS-Region where operation is executed.\n

", + "smithy.api#input": {} + } + }, + "com.amazonaws.sesv2#ListMultiRegionEndpointsResponse": { + "type": "structure", + "members": { + "MultiRegionEndpoints": { + "target": "com.amazonaws.sesv2#MultiRegionEndpoints", + "traits": { + "smithy.api#documentation": "

An array that contains key multi-region endpoint (global-endpoint) properties.

" + } + }, + "NextToken": { + "target": "com.amazonaws.sesv2#NextTokenV2", + "traits": { + "smithy.api#documentation": "

A token indicating that there are additional multi-region endpoints (global-endpoints) available to be listed.\n Pass this token to a subsequent ListMultiRegionEndpoints call to retrieve the\n next page.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The following elements are returned by the service.

", + "smithy.api#output": {} + } + }, "com.amazonaws.sesv2#ListOfContactLists": { "type": "list", "member": { @@ -8009,9 +8488,69 @@ "smithy.api#documentation": "

An object that contains details about the data source for the metrics export.

" } }, + "com.amazonaws.sesv2#MultiRegionEndpoint": { + "type": "structure", + "members": { + "EndpointName": { + "target": "com.amazonaws.sesv2#EndpointName", + "traits": { + "smithy.api#documentation": "

The name of the multi-region endpoint (global-endpoint).

" + } + }, + "Status": { + "target": "com.amazonaws.sesv2#Status", + "traits": { + "smithy.api#documentation": "

The status of the multi-region endpoint (global-endpoint).

\n
    \n
  • \n

    \n CREATING – The resource is being provisioned.

    \n
  • \n
  • \n

    \n READY – The resource is ready to use.

    \n
  • \n
  • \n

    \n FAILED – The resource failed to be provisioned.

    \n
  • \n
  • \n

    \n DELETING – The resource is being deleted as requested.

    \n
  • \n
" + } + }, + "EndpointId": { + "target": "com.amazonaws.sesv2#EndpointId", + "traits": { + "smithy.api#documentation": "

The ID of the multi-region endpoint (global-endpoint).

" + } + }, + "Regions": { + "target": "com.amazonaws.sesv2#Regions", + "traits": { + "smithy.api#documentation": "

Primary and secondary regions between which multi-region endpoint splits sending traffic.

" + } + }, + "CreatedTimestamp": { + "target": "com.amazonaws.sesv2#Timestamp", + "traits": { + "smithy.api#documentation": "

The time stamp of when the multi-region endpoint (global-endpoint) was created.

" + } + }, + "LastUpdatedTimestamp": { + "target": "com.amazonaws.sesv2#Timestamp", + "traits": { + "smithy.api#documentation": "

The time stamp of when the multi-region endpoint (global-endpoint) was last updated.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An object that contains multi-region endpoint (global-endpoint) properties.

" + } + }, + "com.amazonaws.sesv2#MultiRegionEndpoints": { + "type": "list", + "member": { + "target": "com.amazonaws.sesv2#MultiRegionEndpoint" + } + }, "com.amazonaws.sesv2#NextToken": { "type": "string" }, + "com.amazonaws.sesv2#NextTokenV2": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 5000 + }, + "smithy.api#pattern": "^^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$" + } + }, "com.amazonaws.sesv2#NotFoundException": { "type": "structure", "members": { @@ -8054,6 +8593,15 @@ "smithy.api#documentation": "

An object that contains information about email that was sent from the selected\n domain.

" } }, + "com.amazonaws.sesv2#PageSizeV2": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 1000 + } + } + }, "com.amazonaws.sesv2#Percentage": { "type": "double", "traits": { @@ -9634,6 +10182,18 @@ "target": "com.amazonaws.sesv2#Recommendation" } }, + "com.amazonaws.sesv2#Region": { + "type": "string", + "traits": { + "smithy.api#documentation": "

The name of an AWS-Region.

" + } + }, + "com.amazonaws.sesv2#Regions": { + "type": "list", + "member": { + "target": "com.amazonaws.sesv2#Region" + } + }, "com.amazonaws.sesv2#RenderedEmailTemplate": { "type": "string", "traits": { @@ -9750,6 +10310,54 @@ } } }, + "com.amazonaws.sesv2#Route": { + "type": "structure", + "members": { + "Region": { + "target": "com.amazonaws.sesv2#Region", + "traits": { + "smithy.api#documentation": "

The name of an AWS-Region.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

An object which contains an AWS-Region and routing status.

" + } + }, + "com.amazonaws.sesv2#RouteDetails": { + "type": "structure", + "members": { + "Region": { + "target": "com.amazonaws.sesv2#Region", + "traits": { + "smithy.api#documentation": "

The name of an AWS-Region to be a secondary region for the multi-region endpoint (global-endpoint).

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

An object that contains route configuration. Includes secondary region name.

" + } + }, + "com.amazonaws.sesv2#Routes": { + "type": "list", + "member": { + "target": "com.amazonaws.sesv2#Route" + }, + "traits": { + "smithy.api#documentation": "

A list of routes between which the traffic will be split when sending through the multi-region endpoint (global-endpoint).

" + } + }, + "com.amazonaws.sesv2#RoutesDetails": { + "type": "list", + "member": { + "target": "com.amazonaws.sesv2#RouteDetails" + }, + "traits": { + "smithy.api#documentation": "

A list of route configuration details. Must contain exactly one route configuration.

" + } + }, "com.amazonaws.sesv2#S3Url": { "type": "string", "traits": { @@ -9912,6 +10520,15 @@ "traits": { "smithy.api#documentation": "

The name of the configuration set to use when sending the email.

" } + }, + "EndpointId": { + "target": "com.amazonaws.sesv2#EndpointId", + "traits": { + "smithy.api#documentation": "

The ID of the multi-region endpoint (global-endpoint).

", + "smithy.rules#contextParam": { + "name": "EndpointId" + } + } } }, "traits": { @@ -10120,6 +10737,15 @@ "smithy.api#documentation": "

The name of the configuration set to use when sending the email.

" } }, + "EndpointId": { + "target": "com.amazonaws.sesv2#EndpointId", + "traits": { + "smithy.api#documentation": "

The ID of the multi-region endpoint (global-endpoint).

", + "smithy.rules#contextParam": { + "name": "EndpointId" + } + } + }, "ListManagementOptions": { "target": "com.amazonaws.sesv2#ListManagementOptions", "traits": { @@ -10268,6 +10894,9 @@ { "target": "com.amazonaws.sesv2#CreateImportJob" }, + { + "target": "com.amazonaws.sesv2#CreateMultiRegionEndpoint" + }, { "target": "com.amazonaws.sesv2#DeleteConfigurationSet" }, @@ -10295,6 +10924,9 @@ { "target": "com.amazonaws.sesv2#DeleteEmailTemplate" }, + { + "target": "com.amazonaws.sesv2#DeleteMultiRegionEndpoint" + }, { "target": "com.amazonaws.sesv2#DeleteSuppressedDestination" }, @@ -10358,6 +10990,9 @@ { "target": "com.amazonaws.sesv2#GetMessageInsights" }, + { + "target": "com.amazonaws.sesv2#GetMultiRegionEndpoint" + }, { "target": "com.amazonaws.sesv2#GetSuppressedDestination" }, @@ -10394,6 +11029,9 @@ { "target": "com.amazonaws.sesv2#ListImportJobs" }, + { + "target": "com.amazonaws.sesv2#ListMultiRegionEndpoints" + }, { "target": "com.amazonaws.sesv2#ListRecommendations" }, @@ -10545,9 +11183,199 @@ "required": false, "documentation": "Override the endpoint used to send this request", "type": "String" + }, + "EndpointId": { + "required": false, + "documentation": "Operation parameter for EndpointId", + "type": "String" } }, "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "EndpointId" + } + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + }, + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "isValidHostLabel", + "argv": [ + { + "ref": "EndpointId" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": { + "authSchemes": [ + { + "name": "sigv4a", + "signingName": "ses", + "signingRegionSet": [ + "*" + ] + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://{EndpointId}.endpoints.email.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4a", + "signingName": "ses", + "signingRegionSet": [ + "*" + ] + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "endpoint": { + "url": "https://{EndpointId}.endpoints.email.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4a", + "signingName": "ses", + "signingRegionSet": [ + "*" + ] + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: FIPS is not supported with multi-region endpoints", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "EndpointId must be a valid host label", + "type": "error" + } + ], + "type": "tree" + }, { "conditions": [ { @@ -11427,6 +12255,163 @@ "expect": { "error": "Invalid Configuration: Missing Region" } + }, + { + "documentation": "Valid EndpointId with dualstack and FIPS disabled. i.e, IPv4 Only stack with no FIPS", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingName": "ses", + "name": "sigv4a", + "signingRegionSet": [ + "*" + ] + } + ] + }, + "url": "https://abc123.456def.endpoints.email.amazonaws.com" + } + }, + "params": { + "EndpointId": "abc123.456def", + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-east-1" + } + }, + { + "documentation": "Valid EndpointId with dualstack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingName": "ses", + "name": "sigv4a", + "signingRegionSet": [ + "*" + ] + } + ] + }, + "url": "https://abc123.456def.endpoints.email.api.aws" + } + }, + "params": { + "EndpointId": "abc123.456def", + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-west-2" + } + }, + { + "documentation": "Valid EndpointId with FIPS set, dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS is not supported with multi-region endpoints" + }, + "params": { + "EndpointId": "abc123.456def", + "UseDualStack": false, + "UseFIPS": true, + "Region": "ap-northeast-1" + } + }, + { + "documentation": "Valid EndpointId with both dualstack and FIPS enabled", + "expect": { + "error": "Invalid Configuration: FIPS is not supported with multi-region endpoints" + }, + "params": { + "EndpointId": "abc123.456def", + "UseDualStack": true, + "UseFIPS": true, + "Region": "ap-northeast-2" + } + }, + { + "documentation": "Regular regional request, without EndpointId", + "expect": { + "endpoint": { + "url": "https://email.eu-west-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "Region": "eu-west-1" + } + }, + { + "documentation": "Invalid EndpointId (Invalid chars / format)", + "expect": { + "error": "EndpointId must be a valid host label" + }, + "params": { + "EndpointId": "badactor.com?foo=bar", + "UseDualStack": false, + "Region": "eu-west-2" + } + }, + { + "documentation": "Invalid EndpointId (Empty)", + "expect": { + "error": "EndpointId must be a valid host label" + }, + "params": { + "EndpointId": "", + "UseDualStack": false, + "Region": "ap-south-1" + } + }, + { + "documentation": "Valid EndpointId with custom sdk endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingName": "ses", + "name": "sigv4a", + "signingRegionSet": [ + "*" + ] + } + ] + }, + "url": "https://example.com" + } + }, + "params": { + "EndpointId": "abc123.456def", + "UseDualStack": false, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Valid EndpointId with custom sdk endpoint with FIPS enabled", + "expect": { + "error": "Invalid Configuration: FIPS is not supported with multi-region endpoints" + }, + "params": { + "EndpointId": "abc123.456def", + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Valid EndpointId with DualStack enabled and partition does not support DualStack", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "EndpointId": "abc123.456def", + "UseDualStack": true, + "Region": "us-isob-east-1" + } } ], "version": "1.0" @@ -11448,6 +12433,38 @@ "smithy.api#documentation": "

An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to\n send notifications when certain email events occur.

" } }, + "com.amazonaws.sesv2#Status": { + "type": "enum", + "members": { + "CREATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATING" + } + }, + "READY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "READY" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "DELETING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETING" + } + } + }, + "traits": { + "smithy.api#documentation": "

The status of the multi-region endpoint (global-endpoint).

\n
    \n
  • \n

    \n CREATING – The resource is being provisioned.

    \n
  • \n
  • \n

    \n READY – The resource is ready to use.

    \n
  • \n
  • \n

    \n FAILED – The resource failed to be provisioned.

    \n
  • \n
  • \n

    \n DELETING – The resource is being deleted as requested.

    \n
  • \n
" + } + }, "com.amazonaws.sesv2#Subject": { "type": "string" }, @@ -12618,6 +13635,36 @@ "traits": { "smithy.api#enumValue": "INVALID_VALUE" } + }, + "REPLICATION_ACCESS_DENIED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REPLICATION_ACCESS_DENIED" + } + }, + "REPLICATION_PRIMARY_NOT_FOUND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REPLICATION_PRIMARY_NOT_FOUND" + } + }, + "REPLICATION_PRIMARY_BYO_DKIM_NOT_SUPPORTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REPLICATION_PRIMARY_BYO_DKIM_NOT_SUPPORTED" + } + }, + "REPLICATION_REPLICA_AS_PRIMARY_NOT_SUPPORTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REPLICATION_REPLICA_AS_PRIMARY_NOT_SUPPORTED" + } + }, + "REPLICATION_PRIMARY_INVALID_REGION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REPLICATION_PRIMARY_INVALID_REGION" + } } } }, @@ -12639,7 +13686,7 @@ "ErrorType": { "target": "com.amazonaws.sesv2#VerificationError", "traits": { - "smithy.api#documentation": "

Provides the reason for the failure describing why Amazon SES was not able to successfully\n verify the identity. Below are the possible values:

\n
    \n
  • \n

    \n INVALID_VALUE – Amazon SES was able to find the record, but the\n value contained within the record was invalid. Ensure you have published the\n correct values for the record.

    \n
  • \n
  • \n

    \n TYPE_NOT_FOUND – The queried hostname exists but does not\n have the requested type of DNS record. Ensure that you have published the\n correct type of DNS record.

    \n
  • \n
  • \n

    \n HOST_NOT_FOUND – The queried hostname does not exist or was\n not reachable at the time of the request. Ensure that you have published the\n required DNS record(s).

    \n
  • \n
  • \n

    \n SERVICE_ERROR – A temporary issue is preventing Amazon SES from\n determining the verification status of the domain.

    \n
  • \n
  • \n

    \n DNS_SERVER_ERROR – The DNS server encountered an issue and\n was unable to complete the request.

    \n
  • \n
" + "smithy.api#documentation": "

Provides the reason for the failure describing why Amazon SES was not able to successfully\n verify the identity. Below are the possible values:

\n
    \n
  • \n

    \n INVALID_VALUE – Amazon SES was able to find the record, but the\n value contained within the record was invalid. Ensure you have published the\n correct values for the record.

    \n
  • \n
  • \n

    \n TYPE_NOT_FOUND – The queried hostname exists but does not\n have the requested type of DNS record. Ensure that you have published the\n correct type of DNS record.

    \n
  • \n
  • \n

    \n HOST_NOT_FOUND – The queried hostname does not exist or was\n not reachable at the time of the request. Ensure that you have published the\n required DNS record(s).

    \n
  • \n
  • \n

    \n SERVICE_ERROR – A temporary issue is preventing Amazon SES from\n determining the verification status of the domain.

    \n
  • \n
  • \n

    \n DNS_SERVER_ERROR – The DNS server encountered an issue and\n was unable to complete the request.

    \n
  • \n
  • \n

    \n REPLICATION_ACCESS_DENIED – The verification failed because the user does not\n have the required permissions to replicate the DKIM key from the primary region. Ensure you have the\n necessary permissions in both primary and replica regions.\n

    \n
  • \n
  • \n

    \n REPLICATION_PRIMARY_NOT_FOUND – The verification failed because no corresponding\n identity was found in the specified primary region. Ensure the identity exists in the primary region\n before attempting replication.\n

    \n
  • \n
  • \n

    \n REPLICATION_PRIMARY_BYO_DKIM_NOT_SUPPORTED – The verification failed because the\n identity in the primary region is configured with Bring Your Own DKIM (BYODKIM). DKIM key\n replication is only supported for identities using Easy DKIM.\n

    \n
  • \n
  • \n

    \n REPLICATION_REPLICA_AS_PRIMARY_NOT_SUPPORTED – The verification failed because\n the specified primary identity is a replica of another identity, and multi-level replication is not\n supported; the primary identity must be a non-replica identity.\n

    \n
  • \n
  • \n

    \n REPLICATION_PRIMARY_INVALID_REGION – The verification failed due to an invalid\n primary region specified. Ensure you provide a valid AWS region where Amazon SES is available and different\n from the replica region.\n

    \n
  • \n
" } }, "SOARecord": { diff --git a/models/synthetics.json b/models/synthetics.json index 571fa4c4a0..940783ca85 100644 --- a/models/synthetics.json +++ b/models/synthetics.json @@ -314,7 +314,7 @@ "min": 1, "max": 2048 }, - "smithy.api#pattern": "^arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:canary:[0-9a-z_\\-]{1,255}$" + "smithy.api#pattern": "^arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2,4}(-[a-z]{2,4})?-[a-z]+-\\d{1}:\\d{12}:canary:[0-9a-z_\\-]{1,255}$" } }, "com.amazonaws.synthetics#CanaryCodeInput": { @@ -1526,7 +1526,7 @@ "min": 1, "max": 2048 }, - "smithy.api#pattern": "^arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?$" + "smithy.api#pattern": "^arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2,4}(-[a-z]{2,4})?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?$" } }, "com.amazonaws.synthetics#GetCanary": { @@ -1777,7 +1777,7 @@ "min": 1, "max": 128 }, - "smithy.api#pattern": "^arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:group:[0-9a-z]+$" + "smithy.api#pattern": "^arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2,4}(-[a-z]{2,4})?-[a-z]+-\\d{1}:\\d{12}:group:[0-9a-z]+$" } }, "com.amazonaws.synthetics#GroupIdentifier": { @@ -1863,7 +1863,7 @@ "min": 1, "max": 2048 }, - "smithy.api#pattern": "^arn:(aws[a-zA-Z-]*)?:kms:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:key/[\\w\\-\\/]+$" + "smithy.api#pattern": "^arn:(aws[a-zA-Z-]*)?:kms:[a-z]{2,4}(-[a-z]{2,4})?-[a-z]+-\\d{1}:\\d{12}:key/[\\w\\-\\/]+$" } }, "com.amazonaws.synthetics#ListAssociatedGroups": { @@ -2291,7 +2291,7 @@ "min": 1, "max": 2048 }, - "smithy.api#pattern": "^arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:(canary|group):[0-9a-z_\\-]+$" + "smithy.api#pattern": "^arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2,4}(-[a-z]{2,4})?-[a-z]+-\\d{1}:\\d{12}:(canary|group):[0-9a-z_\\-]+$" } }, "com.amazonaws.synthetics#ResourceList": { @@ -3987,7 +3987,7 @@ "BaseCanaryRunId": { "target": "com.amazonaws.synthetics#String", "traits": { - "smithy.api#documentation": "

Specifies which canary run to use the screenshots from as the baseline for future visual monitoring with this canary. Valid values are \n nextrun to use the screenshots from the next run after this update is made, lastrun to use the screenshots from the most recent run \n before this update was made, or the value of Id in the \n CanaryRun from any past run of this canary.

", + "smithy.api#documentation": "

Specifies which canary run to use the screenshots from as the baseline for future visual monitoring with this canary. Valid values are \n nextrun to use the screenshots from the next run after this update is made, lastrun to use the screenshots from the most recent run \n before this update was made, or the value of Id in the \n CanaryRun from a run of this a canary in the past 31 days. If you specify the Id of a canary run older than 31 days, \n the operation returns a 400 validation exception error..

", "smithy.api#required": {} } } @@ -4030,6 +4030,12 @@ "traits": { "smithy.api#documentation": "

The IDs of the security groups for this canary.

" } + }, + "Ipv6AllowedForDualStack": { + "target": "com.amazonaws.synthetics#NullableBoolean", + "traits": { + "smithy.api#documentation": "

Set this to true to allow outbound IPv6 traffic on VPC canaries that are connected to dual-stack subnets. The default is false\n

" + } } }, "traits": { @@ -4056,6 +4062,12 @@ "traits": { "smithy.api#documentation": "

The IDs of the security groups for this canary.

" } + }, + "Ipv6AllowedForDualStack": { + "target": "com.amazonaws.synthetics#NullableBoolean", + "traits": { + "smithy.api#documentation": "

Indicates whether this canary allows outbound IPv6 traffic if it is connected to dual-stack subnets.

" + } } }, "traits": { diff --git a/models/timestream-influxdb.json b/models/timestream-influxdb.json index b7ca551f5c..5c6f1684eb 100644 --- a/models/timestream-influxdb.json +++ b/models/timestream-influxdb.json @@ -80,7 +80,7 @@ "*,authorization,date,x-amz-date,x-amz-security-token,x-amz-target,content-type,x-amz-content-sha256,x-amz-user-agent,x-amzn-platform-id,x-amzn-trace-id,amz-sdk-invocation-id,amz-sdk-request" ] }, - "smithy.api#documentation": "

Amazon Timestream for InfluxDB is a managed time-series database engine that makes it easy for application developers and DevOps teams to run InfluxDB databases on AWS for near real-time time-series applications using open-source APIs. With Amazon Timestream for InfluxDB, it is easy to set up, operate, and scale time-series workloads that can answer queries with single-digit millisecond query response time.

", + "smithy.api#documentation": "

Amazon Timestream for InfluxDB is a managed time-series database engine that makes it easy for application developers and DevOps teams to run InfluxDB databases on Amazon Web Services for near real-time time-series applications using open-source APIs. With Amazon Timestream for InfluxDB, it is easy to set up, operate, and scale time-series workloads that can answer queries with single-digit millisecond query response time.

", "smithy.api#title": "Timestream InfluxDB", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -860,7 +860,7 @@ "password": { "target": "com.amazonaws.timestreaminfluxdb#Password", "traits": { - "smithy.api#documentation": "

The password of the initial admin user created in InfluxDB. This password will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. These attributes will be stored in a Secret created in AWS SecretManager in your account.

", + "smithy.api#documentation": "

The password of the initial admin user created in InfluxDB. This password will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. These attributes will be stored in a Secret created in Amazon Web Services SecretManager in your account.

", "smithy.api#required": {} } }, @@ -946,6 +946,12 @@ "smithy.api#default": 8086, "smithy.api#documentation": "

The port number on which InfluxDB accepts connections.

\n

Valid Values: 1024-65535

\n

Default: 8086

\n

Constraints: The value can't be 2375-2376, 7788-7799, 8090, or 51678-51680

" } + }, + "networkType": { + "target": "com.amazonaws.timestreaminfluxdb#NetworkType", + "traits": { + "smithy.api#documentation": "

Specifies whether the networkType of the Timestream for InfluxDB instance is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols.

" + } } }, "traits": { @@ -999,6 +1005,12 @@ "smithy.api#documentation": "

The port number on which InfluxDB accepts connections. The default value is 8086.

" } }, + "networkType": { + "target": "com.amazonaws.timestreaminfluxdb#NetworkType", + "traits": { + "smithy.api#documentation": "

Specifies whether the networkType of the Timestream for InfluxDB instance is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols.

" + } + }, "dbInstanceType": { "target": "com.amazonaws.timestreaminfluxdb#DbInstanceType", "traits": { @@ -1069,7 +1081,7 @@ "influxAuthParametersSecretArn": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password.

" } } }, @@ -1272,7 +1284,7 @@ "name": { "target": "com.amazonaws.timestreaminfluxdb#DbInstanceName", "traits": { - "smithy.api#documentation": "

This customer-supplied name uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and AWS CLI commands.

", + "smithy.api#documentation": "

This customer-supplied name uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and Amazon Web Services CLI commands.

", "smithy.api#required": {} } }, @@ -1301,6 +1313,12 @@ "smithy.api#documentation": "

The port number on which InfluxDB accepts connections.

" } }, + "networkType": { + "target": "com.amazonaws.timestreaminfluxdb#NetworkType", + "traits": { + "smithy.api#documentation": "

Specifies whether the networkType of the Timestream for InfluxDB instance is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols.

" + } + }, "dbInstanceType": { "target": "com.amazonaws.timestreaminfluxdb#DbInstanceType", "traits": { @@ -1602,6 +1620,12 @@ "smithy.api#documentation": "

The port number on which InfluxDB accepts connections.

" } }, + "networkType": { + "target": "com.amazonaws.timestreaminfluxdb#NetworkType", + "traits": { + "smithy.api#documentation": "

Specifies whether the networkType of the Timestream for InfluxDB instance is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols.

" + } + }, "dbInstanceType": { "target": "com.amazonaws.timestreaminfluxdb#DbInstanceType", "traits": { @@ -1672,7 +1696,7 @@ "influxAuthParametersSecretArn": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password.

" } } }, @@ -1842,6 +1866,12 @@ "smithy.api#documentation": "

The port number on which InfluxDB accepts connections.

" } }, + "networkType": { + "target": "com.amazonaws.timestreaminfluxdb#NetworkType", + "traits": { + "smithy.api#documentation": "

Specifies whether the networkType of the Timestream for InfluxDB instance is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols.

" + } + }, "dbInstanceType": { "target": "com.amazonaws.timestreaminfluxdb#DbInstanceType", "traits": { @@ -1912,7 +1942,7 @@ "influxAuthParametersSecretArn": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password.

" } } }, @@ -2578,6 +2608,23 @@ } } }, + "com.amazonaws.timestreaminfluxdb#NetworkType": { + "type": "enum", + "members": { + "IPV4": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IPV4" + } + }, + "DUAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DUAL" + } + } + } + }, "com.amazonaws.timestreaminfluxdb#NextToken": { "type": "string", "traits": { @@ -2822,6 +2869,9 @@ "errors": [ { "target": "com.amazonaws.timestreaminfluxdb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.timestreaminfluxdb#ServiceQuotaExceededException" } ], "traits": { @@ -3080,7 +3130,7 @@ "name": { "target": "com.amazonaws.timestreaminfluxdb#DbInstanceName", "traits": { - "smithy.api#documentation": "

This customer-supplied name uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and AWS CLI commands.

", + "smithy.api#documentation": "

This customer-supplied name uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and Amazon Web Services CLI commands.

", "smithy.api#required": {} } }, @@ -3109,6 +3159,12 @@ "smithy.api#documentation": "

The port number on which InfluxDB accepts connections.

" } }, + "networkType": { + "target": "com.amazonaws.timestreaminfluxdb#NetworkType", + "traits": { + "smithy.api#documentation": "

Specifies whether the networkType of the Timestream for InfluxDB instance is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols.

" + } + }, "dbInstanceType": { "target": "com.amazonaws.timestreaminfluxdb#DbInstanceType", "traits": { @@ -3179,7 +3235,7 @@ "influxAuthParametersSecretArn": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password.

" } } }, diff --git a/models/workspaces.json b/models/workspaces.json index bc58c84cf0..8d64aa31b1 100644 --- a/models/workspaces.json +++ b/models/workspaces.json @@ -11152,7 +11152,7 @@ "UserName": { "target": "com.amazonaws.workspaces#UserName", "traits": { - "smithy.api#documentation": "

The user name of the user for the WorkSpace. This user name must exist in the Directory Service directory for the WorkSpace.

\n

The reserved keyword, [UNDEFINED], is used when creating user-decoupled WorkSpaces.

", + "smithy.api#documentation": "

The user name of the user for the WorkSpace. This user name must exist in the Directory Service directory for the WorkSpace.

\n

The username is not case-sensitive, but we recommend matching the case in the Directory Service directory to avoid potential incompatibilities.

\n

The reserved keyword, [UNDEFINED], is used when creating user-decoupled WorkSpaces.

", "smithy.api#required": {} } },